aboutsummaryrefslogtreecommitdiffhomepage
path: root/third_party/eigen3/Eigen/src/Core/util
diff options
context:
space:
mode:
Diffstat (limited to 'third_party/eigen3/Eigen/src/Core/util')
-rw-r--r--third_party/eigen3/Eigen/src/Core/util/BlasUtil.h237
-rw-r--r--third_party/eigen3/Eigen/src/Core/util/Constants.h453
-rw-r--r--third_party/eigen3/Eigen/src/Core/util/DisableStupidWarnings.h40
-rw-r--r--third_party/eigen3/Eigen/src/Core/util/ForwardDeclarations.h301
-rw-r--r--third_party/eigen3/Eigen/src/Core/util/MKL_support.h126
-rw-r--r--third_party/eigen3/Eigen/src/Core/util/Macros.h740
-rw-r--r--third_party/eigen3/Eigen/src/Core/util/MatrixMapper.h155
-rw-r--r--third_party/eigen3/Eigen/src/Core/util/Memory.h984
-rw-r--r--third_party/eigen3/Eigen/src/Core/util/Meta.h334
-rw-r--r--third_party/eigen3/Eigen/src/Core/util/ReenableStupidWarnings.h14
-rw-r--r--third_party/eigen3/Eigen/src/Core/util/StaticAssert.h206
-rw-r--r--third_party/eigen3/Eigen/src/Core/util/XprHelper.h481
12 files changed, 4071 insertions, 0 deletions
diff --git a/third_party/eigen3/Eigen/src/Core/util/BlasUtil.h b/third_party/eigen3/Eigen/src/Core/util/BlasUtil.h
new file mode 100644
index 0000000000..bbaff8dd0e
--- /dev/null
+++ b/third_party/eigen3/Eigen/src/Core/util/BlasUtil.h
@@ -0,0 +1,237 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_BLASUTIL_H
+#define EIGEN_BLASUTIL_H
+
+// This file contains many lightweight helper classes used to
+// implement and control fast level 2 and level 3 BLAS-like routines.
+
+namespace Eigen {
+
+namespace internal {
+
+// forward declarations
+template<typename LhsScalar, typename RhsScalar, typename Index, typename DataMapper, int mr, int nr, bool ConjugateLhs=false, bool ConjugateRhs=false>
+struct gebp_kernel;
+
+template<typename Scalar, typename Index, typename DataMapper, int nr, int StorageOrder, bool Conjugate = false, bool PanelMode=false>
+struct gemm_pack_rhs;
+
+template<typename Scalar, typename Index, typename DataMapper, int Pack1, int Pack2, int StorageOrder, bool Conjugate = false, bool PanelMode = false>
+struct gemm_pack_lhs;
+
+template<
+ typename Index,
+ typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
+ typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs,
+ int ResStorageOrder>
+struct general_matrix_matrix_product;
+
+template<typename Index, typename LhsScalar, typename LhsMapper, int LhsStorageOrder, bool ConjugateLhs, typename RhsScalar, typename RhsMapper, bool ConjugateRhs, int Version=Specialized>
+struct general_matrix_vector_product;
+
+
+template<bool Conjugate> struct conj_if;
+
+template<> struct conj_if<true> {
+ template<typename T>
+ inline T operator()(const T& x) { return numext::conj(x); }
+ template<typename T>
+ inline T pconj(const T& x) { return internal::pconj(x); }
+};
+
+template<> struct conj_if<false> {
+ template<typename T>
+ inline const T& operator()(const T& x) { return x; }
+ template<typename T>
+ inline const T& pconj(const T& x) { return x; }
+};
+
+template<typename Scalar> struct conj_helper<Scalar,Scalar,false,false>
+{
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar pmadd(const Scalar& x, const Scalar& y, const Scalar& c) const { return internal::pmadd(x,y,c); }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar pmul(const Scalar& x, const Scalar& y) const { return internal::pmul(x,y); }
+};
+
+template<typename RealScalar> struct conj_helper<std::complex<RealScalar>, std::complex<RealScalar>, false,true>
+{
+ typedef std::complex<RealScalar> Scalar;
+ EIGEN_STRONG_INLINE Scalar pmadd(const Scalar& x, const Scalar& y, const Scalar& c) const
+ { return c + pmul(x,y); }
+
+ EIGEN_STRONG_INLINE Scalar pmul(const Scalar& x, const Scalar& y) const
+ { return Scalar(numext::real(x)*numext::real(y) + numext::imag(x)*numext::imag(y), numext::imag(x)*numext::real(y) - numext::real(x)*numext::imag(y)); }
+};
+
+template<typename RealScalar> struct conj_helper<std::complex<RealScalar>, std::complex<RealScalar>, true,false>
+{
+ typedef std::complex<RealScalar> Scalar;
+ EIGEN_STRONG_INLINE Scalar pmadd(const Scalar& x, const Scalar& y, const Scalar& c) const
+ { return c + pmul(x,y); }
+
+ EIGEN_STRONG_INLINE Scalar pmul(const Scalar& x, const Scalar& y) const
+ { return Scalar(numext::real(x)*numext::real(y) + numext::imag(x)*numext::imag(y), numext::real(x)*numext::imag(y) - numext::imag(x)*numext::real(y)); }
+};
+
+template<typename RealScalar> struct conj_helper<std::complex<RealScalar>, std::complex<RealScalar>, true,true>
+{
+ typedef std::complex<RealScalar> Scalar;
+ EIGEN_STRONG_INLINE Scalar pmadd(const Scalar& x, const Scalar& y, const Scalar& c) const
+ { return c + pmul(x,y); }
+
+ EIGEN_STRONG_INLINE Scalar pmul(const Scalar& x, const Scalar& y) const
+ { return Scalar(numext::real(x)*numext::real(y) - numext::imag(x)*numext::imag(y), - numext::real(x)*numext::imag(y) - numext::imag(x)*numext::real(y)); }
+};
+
+template<typename RealScalar,bool Conj> struct conj_helper<std::complex<RealScalar>, RealScalar, Conj,false>
+{
+ typedef std::complex<RealScalar> Scalar;
+ EIGEN_STRONG_INLINE Scalar pmadd(const Scalar& x, const RealScalar& y, const Scalar& c) const
+ { return padd(c, pmul(x,y)); }
+ EIGEN_STRONG_INLINE Scalar pmul(const Scalar& x, const RealScalar& y) const
+ { return conj_if<Conj>()(x)*y; }
+};
+
+template<typename RealScalar,bool Conj> struct conj_helper<RealScalar, std::complex<RealScalar>, false,Conj>
+{
+ typedef std::complex<RealScalar> Scalar;
+ EIGEN_STRONG_INLINE Scalar pmadd(const RealScalar& x, const Scalar& y, const Scalar& c) const
+ { return padd(c, pmul(x,y)); }
+ EIGEN_STRONG_INLINE Scalar pmul(const RealScalar& x, const Scalar& y) const
+ { return x*conj_if<Conj>()(y); }
+};
+
+template<typename From,typename To> struct get_factor {
+ EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE To run(const From& x) { return x; }
+};
+
+template<typename Scalar> struct get_factor<Scalar,typename NumTraits<Scalar>::Real> {
+ EIGEN_DEVICE_FUNC
+ static EIGEN_STRONG_INLINE typename NumTraits<Scalar>::Real run(const Scalar& x) { return numext::real(x); }
+};
+
+
+/* Helper class to analyze the factors of a Product expression.
+ * In particular it allows to pop out operator-, scalar multiples,
+ * and conjugate */
+template<typename XprType> struct blas_traits
+{
+ typedef typename traits<XprType>::Scalar Scalar;
+ typedef const XprType& ExtractType;
+ typedef XprType _ExtractType;
+ enum {
+ IsComplex = NumTraits<Scalar>::IsComplex,
+ IsTransposed = false,
+ NeedToConjugate = false,
+ HasUsableDirectAccess = ( (int(XprType::Flags)&DirectAccessBit)
+ && ( bool(XprType::IsVectorAtCompileTime)
+ || int(inner_stride_at_compile_time<XprType>::ret) == 1)
+ ) ? 1 : 0
+ };
+ typedef typename conditional<bool(HasUsableDirectAccess),
+ ExtractType,
+ typename _ExtractType::PlainObject
+ >::type DirectLinearAccessType;
+ static inline ExtractType extract(const XprType& x) { return x; }
+ static inline const Scalar extractScalarFactor(const XprType&) { return Scalar(1); }
+};
+
+// pop conjugate
+template<typename Scalar, typename NestedXpr>
+struct blas_traits<CwiseUnaryOp<scalar_conjugate_op<Scalar>, NestedXpr> >
+ : blas_traits<NestedXpr>
+{
+ typedef blas_traits<NestedXpr> Base;
+ typedef CwiseUnaryOp<scalar_conjugate_op<Scalar>, NestedXpr> XprType;
+ typedef typename Base::ExtractType ExtractType;
+
+ enum {
+ IsComplex = NumTraits<Scalar>::IsComplex,
+ NeedToConjugate = Base::NeedToConjugate ? 0 : IsComplex
+ };
+ static inline ExtractType extract(const XprType& x) { return Base::extract(x.nestedExpression()); }
+ static inline Scalar extractScalarFactor(const XprType& x) { return conj(Base::extractScalarFactor(x.nestedExpression())); }
+};
+
+// pop scalar multiple
+template<typename Scalar, typename NestedXpr>
+struct blas_traits<CwiseUnaryOp<scalar_multiple_op<Scalar>, NestedXpr> >
+ : blas_traits<NestedXpr>
+{
+ typedef blas_traits<NestedXpr> Base;
+ typedef CwiseUnaryOp<scalar_multiple_op<Scalar>, NestedXpr> XprType;
+ typedef typename Base::ExtractType ExtractType;
+ static inline ExtractType extract(const XprType& x) { return Base::extract(x.nestedExpression()); }
+ static inline Scalar extractScalarFactor(const XprType& x)
+ { return x.functor().m_other * Base::extractScalarFactor(x.nestedExpression()); }
+};
+
+// pop opposite
+template<typename Scalar, typename NestedXpr>
+struct blas_traits<CwiseUnaryOp<scalar_opposite_op<Scalar>, NestedXpr> >
+ : blas_traits<NestedXpr>
+{
+ typedef blas_traits<NestedXpr> Base;
+ typedef CwiseUnaryOp<scalar_opposite_op<Scalar>, NestedXpr> XprType;
+ typedef typename Base::ExtractType ExtractType;
+ static inline ExtractType extract(const XprType& x) { return Base::extract(x.nestedExpression()); }
+ static inline Scalar extractScalarFactor(const XprType& x)
+ { return - Base::extractScalarFactor(x.nestedExpression()); }
+};
+
+// pop/push transpose
+template<typename NestedXpr>
+struct blas_traits<Transpose<NestedXpr> >
+ : blas_traits<NestedXpr>
+{
+ typedef typename NestedXpr::Scalar Scalar;
+ typedef blas_traits<NestedXpr> Base;
+ typedef Transpose<NestedXpr> XprType;
+ typedef Transpose<const typename Base::_ExtractType> ExtractType; // const to get rid of a compile error; anyway blas traits are only used on the RHS
+ typedef Transpose<const typename Base::_ExtractType> _ExtractType;
+ typedef typename conditional<bool(Base::HasUsableDirectAccess),
+ ExtractType,
+ typename ExtractType::PlainObject
+ >::type DirectLinearAccessType;
+ enum {
+ IsTransposed = Base::IsTransposed ? 0 : 1
+ };
+ static inline ExtractType extract(const XprType& x) { return Base::extract(x.nestedExpression()); }
+ static inline Scalar extractScalarFactor(const XprType& x) { return Base::extractScalarFactor(x.nestedExpression()); }
+};
+
+template<typename T>
+struct blas_traits<const T>
+ : blas_traits<T>
+{};
+
+template<typename T, bool HasUsableDirectAccess=blas_traits<T>::HasUsableDirectAccess>
+struct extract_data_selector {
+ static const typename T::Scalar* run(const T& m)
+ {
+ return blas_traits<T>::extract(m).data();
+ }
+};
+
+template<typename T>
+struct extract_data_selector<T,false> {
+ static typename T::Scalar* run(const T&) { return 0; }
+};
+
+template<typename T> const typename T::Scalar* extract_data(const T& m)
+{
+ return extract_data_selector<T>::run(m);
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_BLASUTIL_H
diff --git a/third_party/eigen3/Eigen/src/Core/util/Constants.h b/third_party/eigen3/Eigen/src/Core/util/Constants.h
new file mode 100644
index 0000000000..be14df0168
--- /dev/null
+++ b/third_party/eigen3/Eigen/src/Core/util/Constants.h
@@ -0,0 +1,453 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2007-2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CONSTANTS_H
+#define EIGEN_CONSTANTS_H
+
+namespace Eigen {
+
+/** This value means that a positive quantity (e.g., a size) is not known at compile-time, and that instead the value is
+ * stored in some runtime variable.
+ *
+ * Changing the value of Dynamic breaks the ABI, as Dynamic is often used as a template parameter for Matrix.
+ */
+const int Dynamic = -1;
+
+/** This value means that a signed quantity (e.g., a signed index) is not known at compile-time, and that instead its value
+ * has to be specified at runtime.
+ */
+const int DynamicIndex = 0xffffff;
+
+/** This value means +Infinity; it is currently used only as the p parameter to MatrixBase::lpNorm<int>().
+ * The value Infinity there means the L-infinity norm.
+ */
+const int Infinity = -1;
+
+/** \defgroup flags Flags
+ * \ingroup Core_Module
+ *
+ * These are the possible bits which can be OR'ed to constitute the flags of a matrix or
+ * expression.
+ *
+ * It is important to note that these flags are a purely compile-time notion. They are a compile-time property of
+ * an expression type, implemented as enum's. They are not stored in memory at runtime, and they do not incur any
+ * runtime overhead.
+ *
+ * \sa MatrixBase::Flags
+ */
+
+/** \ingroup flags
+ *
+ * for a matrix, this means that the storage order is row-major.
+ * If this bit is not set, the storage order is column-major.
+ * For an expression, this determines the storage order of
+ * the matrix created by evaluation of that expression.
+ * \sa \ref TopicStorageOrders */
+const unsigned int RowMajorBit = 0x1;
+
+/** \ingroup flags
+ *
+ * means the expression should be evaluated by the calling expression */
+const unsigned int EvalBeforeNestingBit = 0x2;
+
+/** \ingroup flags
+ *
+ * means the expression should be evaluated before any assignment */
+const unsigned int EvalBeforeAssigningBit = 0x4;
+
+/** \ingroup flags
+ *
+ * Short version: means the expression might be vectorized
+ *
+ * Long version: means that the coefficients can be handled by packets
+ * and start at a memory location whose alignment meets the requirements
+ * of the present CPU architecture for optimized packet access. In the fixed-size
+ * case, there is the additional condition that it be possible to access all the
+ * coefficients by packets (this implies the requirement that the size be a multiple of 16 bytes,
+ * and that any nontrivial strides don't break the alignment). In the dynamic-size case,
+ * there is no such condition on the total size and strides, so it might not be possible to access
+ * all coeffs by packets.
+ *
+ * \note This bit can be set regardless of whether vectorization is actually enabled.
+ * To check for actual vectorizability, see \a ActualPacketAccessBit.
+ */
+const unsigned int PacketAccessBit = 0x8;
+
+#ifdef EIGEN_VECTORIZE
+/** \ingroup flags
+ *
+ * If vectorization is enabled (EIGEN_VECTORIZE is defined) this constant
+ * is set to the value \a PacketAccessBit.
+ *
+ * If vectorization is not enabled (EIGEN_VECTORIZE is not defined) this constant
+ * is set to the value 0.
+ */
+const unsigned int ActualPacketAccessBit = PacketAccessBit;
+#else
+const unsigned int ActualPacketAccessBit = 0x0;
+#endif
+
+/** \ingroup flags
+ *
+ * Short version: means the expression can be seen as 1D vector.
+ *
+ * Long version: means that one can access the coefficients
+ * of this expression by coeff(int), and coeffRef(int) in the case of a lvalue expression. These
+ * index-based access methods are guaranteed
+ * to not have to do any runtime computation of a (row, col)-pair from the index, so that it
+ * is guaranteed that whenever it is available, index-based access is at least as fast as
+ * (row,col)-based access. Expressions for which that isn't possible don't have the LinearAccessBit.
+ *
+ * If both PacketAccessBit and LinearAccessBit are set, then the
+ * packets of this expression can be accessed by packet(int), and writePacket(int) in the case of a
+ * lvalue expression.
+ *
+ * Typically, all vector expressions have the LinearAccessBit, but there is one exception:
+ * Product expressions don't have it, because it would be troublesome for vectorization, even when the
+ * Product is a vector expression. Thus, vector Product expressions allow index-based coefficient access but
+ * not index-based packet access, so they don't have the LinearAccessBit.
+ */
+const unsigned int LinearAccessBit = 0x10;
+
+/** \ingroup flags
+ *
+ * Means the expression has a coeffRef() method, i.e. is writable as its individual coefficients are directly addressable.
+ * This rules out read-only expressions.
+ *
+ * Note that DirectAccessBit and LvalueBit are mutually orthogonal, as there are examples of expression having one but note
+ * the other:
+ * \li writable expressions that don't have a very simple memory layout as a strided array, have LvalueBit but not DirectAccessBit
+ * \li Map-to-const expressions, for example Map<const Matrix>, have DirectAccessBit but not LvalueBit
+ *
+ * Expressions having LvalueBit also have their coeff() method returning a const reference instead of returning a new value.
+ */
+const unsigned int LvalueBit = 0x20;
+
+/** \ingroup flags
+ *
+ * Means that the underlying array of coefficients can be directly accessed as a plain strided array. The memory layout
+ * of the array of coefficients must be exactly the natural one suggested by rows(), cols(),
+ * outerStride(), innerStride(), and the RowMajorBit. This rules out expressions such as Diagonal, whose coefficients,
+ * though referencable, do not have such a regular memory layout.
+ *
+ * See the comment on LvalueBit for an explanation of how LvalueBit and DirectAccessBit are mutually orthogonal.
+ */
+const unsigned int DirectAccessBit = 0x40;
+
+/** \ingroup flags
+ *
+ * means the first coefficient packet is guaranteed to be aligned.
+ * An expression cannot has the AlignedBit without the PacketAccessBit flag.
+ * In other words, this means we are allow to perform an aligned packet access to the first element regardless
+ * of the expression kind:
+ * \code
+ * expression.packet<Aligned>(0);
+ * \endcode
+ */
+const unsigned int AlignedBit = 0x80;
+
+const unsigned int NestByRefBit = 0x100;
+
+// list of flags that are inherited by default
+const unsigned int HereditaryBits = RowMajorBit
+ | EvalBeforeNestingBit
+ | EvalBeforeAssigningBit;
+
+/** \defgroup enums Enumerations
+ * \ingroup Core_Module
+ *
+ * Various enumerations used in %Eigen. Many of these are used as template parameters.
+ */
+
+/** \ingroup enums
+ * Enum containing possible values for the \p Mode parameter of
+ * MatrixBase::selfadjointView() and MatrixBase::triangularView(). */
+enum {
+ /** View matrix as a lower triangular matrix. */
+ Lower=0x1,
+ /** View matrix as an upper triangular matrix. */
+ Upper=0x2,
+ /** %Matrix has ones on the diagonal; to be used in combination with #Lower or #Upper. */
+ UnitDiag=0x4,
+ /** %Matrix has zeros on the diagonal; to be used in combination with #Lower or #Upper. */
+ ZeroDiag=0x8,
+ /** View matrix as a lower triangular matrix with ones on the diagonal. */
+ UnitLower=UnitDiag|Lower,
+ /** View matrix as an upper triangular matrix with ones on the diagonal. */
+ UnitUpper=UnitDiag|Upper,
+ /** View matrix as a lower triangular matrix with zeros on the diagonal. */
+ StrictlyLower=ZeroDiag|Lower,
+ /** View matrix as an upper triangular matrix with zeros on the diagonal. */
+ StrictlyUpper=ZeroDiag|Upper,
+ /** Used in BandMatrix and SelfAdjointView to indicate that the matrix is self-adjoint. */
+ SelfAdjoint=0x10,
+ /** Used to support symmetric, non-selfadjoint, complex matrices. */
+ Symmetric=0x20
+};
+
+/** \ingroup enums
+ * Enum for indicating whether an object is aligned or not. */
+enum {
+ /** Object is not correctly aligned for vectorization. */
+ Unaligned=0,
+ /** Object is aligned for vectorization. */
+ Aligned=1
+};
+
+/** \ingroup enums
+ * Enum used by DenseBase::corner() in Eigen2 compatibility mode. */
+// FIXME after the corner() API change, this was not needed anymore, except by AlignedBox
+// TODO: find out what to do with that. Adapt the AlignedBox API ?
+enum CornerType { TopLeft, TopRight, BottomLeft, BottomRight };
+
+/** \ingroup enums
+ * Enum containing possible values for the \p Direction parameter of
+ * Reverse, PartialReduxExpr and VectorwiseOp. */
+enum DirectionType {
+ /** For Reverse, all columns are reversed;
+ * for PartialReduxExpr and VectorwiseOp, act on columns. */
+ Vertical,
+ /** For Reverse, all rows are reversed;
+ * for PartialReduxExpr and VectorwiseOp, act on rows. */
+ Horizontal,
+ /** For Reverse, both rows and columns are reversed;
+ * not used for PartialReduxExpr and VectorwiseOp. */
+ BothDirections
+};
+
+/** \internal \ingroup enums
+ * Enum to specify how to traverse the entries of a matrix. */
+enum {
+ /** \internal Default traversal, no vectorization, no index-based access */
+ DefaultTraversal,
+ /** \internal No vectorization, use index-based access to have only one for loop instead of 2 nested loops */
+ LinearTraversal,
+ /** \internal Equivalent to a slice vectorization for fixed-size matrices having good alignment
+ * and good size */
+ InnerVectorizedTraversal,
+ /** \internal Vectorization path using a single loop plus scalar loops for the
+ * unaligned boundaries */
+ LinearVectorizedTraversal,
+ /** \internal Generic vectorization path using one vectorized loop per row/column with some
+ * scalar loops to handle the unaligned boundaries */
+ SliceVectorizedTraversal,
+ /** \internal Special case to properly handle incompatible scalar types or other defecting cases*/
+ InvalidTraversal,
+ /** \internal Evaluate all entries at once */
+ AllAtOnceTraversal
+};
+
+/** \internal \ingroup enums
+ * Enum to specify whether to unroll loops when traversing over the entries of a matrix. */
+enum {
+ /** \internal Do not unroll loops. */
+ NoUnrolling,
+ /** \internal Unroll only the inner loop, but not the outer loop. */
+ InnerUnrolling,
+ /** \internal Unroll both the inner and the outer loop. If there is only one loop,
+ * because linear traversal is used, then unroll that loop. */
+ CompleteUnrolling
+};
+
+/** \internal \ingroup enums
+ * Enum to specify whether to use the default (built-in) implementation or the specialization. */
+enum {
+ Specialized,
+ BuiltIn
+};
+
+/** \ingroup enums
+ * Enum containing possible values for the \p _Options template parameter of
+ * Matrix, Array and BandMatrix. */
+enum {
+ /** Storage order is column major (see \ref TopicStorageOrders). */
+ ColMajor = 0,
+ /** Storage order is row major (see \ref TopicStorageOrders). */
+ RowMajor = 0x1, // it is only a coincidence that this is equal to RowMajorBit -- don't rely on that
+ /** Align the matrix itself if it is vectorizable fixed-size */
+ AutoAlign = 0,
+ /** Don't require alignment for the matrix itself (the array of coefficients, if dynamically allocated, may still be requested to be aligned) */ // FIXME --- clarify the situation
+ DontAlign = 0x2,
+ AllocateDefault = 0,
+ AllocateUVM = 0x8
+};
+
+/** \ingroup enums
+ * Enum for specifying whether to apply or solve on the left or right. */
+enum {
+ /** Apply transformation on the left. */
+ OnTheLeft = 1,
+ /** Apply transformation on the right. */
+ OnTheRight = 2
+};
+
+/* the following used to be written as:
+ *
+ * struct NoChange_t {};
+ * namespace {
+ * EIGEN_UNUSED NoChange_t NoChange;
+ * }
+ *
+ * on the ground that it feels dangerous to disambiguate overloaded functions on enum/integer types.
+ * However, this leads to "variable declared but never referenced" warnings on Intel Composer XE,
+ * and we do not know how to get rid of them (bug 450).
+ */
+
+enum NoChange_t { NoChange };
+enum Sequential_t { Sequential };
+enum Default_t { Default };
+
+/** \internal \ingroup enums
+ * Used in AmbiVector. */
+enum {
+ IsDense = 0,
+ IsSparse
+};
+
+/** \ingroup enums
+ * Used as template parameter in DenseCoeffBase and MapBase to indicate
+ * which accessors should be provided. */
+enum AccessorLevels {
+ /** Read-only access via a member function. */
+ ReadOnlyAccessors,
+ /** Read/write access via member functions. */
+ WriteAccessors,
+ /** Direct read-only access to the coefficients. */
+ DirectAccessors,
+ /** Direct read/write access to the coefficients. */
+ DirectWriteAccessors
+};
+
+/** \ingroup enums
+ * Enum with options to give to various decompositions. */
+enum DecompositionOptions {
+ /** \internal Not used (meant for LDLT?). */
+ Pivoting = 0x01,
+ /** \internal Not used (meant for LDLT?). */
+ NoPivoting = 0x02,
+ /** Used in JacobiSVD to indicate that the square matrix U is to be computed. */
+ ComputeFullU = 0x04,
+ /** Used in JacobiSVD to indicate that the thin matrix U is to be computed. */
+ ComputeThinU = 0x08,
+ /** Used in JacobiSVD to indicate that the square matrix V is to be computed. */
+ ComputeFullV = 0x10,
+ /** Used in JacobiSVD to indicate that the thin matrix V is to be computed. */
+ ComputeThinV = 0x20,
+ /** Used in SelfAdjointEigenSolver and GeneralizedSelfAdjointEigenSolver to specify
+ * that only the eigenvalues are to be computed and not the eigenvectors. */
+ EigenvaluesOnly = 0x40,
+ /** Used in SelfAdjointEigenSolver and GeneralizedSelfAdjointEigenSolver to specify
+ * that both the eigenvalues and the eigenvectors are to be computed. */
+ ComputeEigenvectors = 0x80,
+ /** \internal */
+ EigVecMask = EigenvaluesOnly | ComputeEigenvectors,
+ /** Used in GeneralizedSelfAdjointEigenSolver to indicate that it should
+ * solve the generalized eigenproblem \f$ Ax = \lambda B x \f$. */
+ Ax_lBx = 0x100,
+ /** Used in GeneralizedSelfAdjointEigenSolver to indicate that it should
+ * solve the generalized eigenproblem \f$ ABx = \lambda x \f$. */
+ ABx_lx = 0x200,
+ /** Used in GeneralizedSelfAdjointEigenSolver to indicate that it should
+ * solve the generalized eigenproblem \f$ BAx = \lambda x \f$. */
+ BAx_lx = 0x400,
+ /** \internal */
+ GenEigMask = Ax_lBx | ABx_lx | BAx_lx
+};
+
+/** \ingroup enums
+ * Possible values for the \p QRPreconditioner template parameter of JacobiSVD. */
+enum QRPreconditioners {
+ /** Do not specify what is to be done if the SVD of a non-square matrix is asked for. */
+ NoQRPreconditioner,
+ /** Use a QR decomposition without pivoting as the first step. */
+ HouseholderQRPreconditioner,
+ /** Use a QR decomposition with column pivoting as the first step. */
+ ColPivHouseholderQRPreconditioner,
+ /** Use a QR decomposition with full pivoting as the first step. */
+ FullPivHouseholderQRPreconditioner
+};
+
+#ifdef Success
+#error The preprocessor symbol 'Success' is defined, possibly by the X11 header file X.h
+#endif
+
+/** \ingroup enums
+ * Enum for reporting the status of a computation. */
+enum ComputationInfo {
+ /** Computation was successful. */
+ Success = 0,
+ /** The provided data did not satisfy the prerequisites. */
+ NumericalIssue = 1,
+ /** Iterative procedure did not converge. */
+ NoConvergence = 2,
+ /** The inputs are invalid, or the algorithm has been improperly called.
+ * When assertions are enabled, such errors trigger an assert. */
+ InvalidInput = 3
+};
+
+/** \ingroup enums
+ * Enum used to specify how a particular transformation is stored in a matrix.
+ * \sa Transform, Hyperplane::transform(). */
+enum TransformTraits {
+ /** Transformation is an isometry. */
+ Isometry = 0x1,
+ /** Transformation is an affine transformation stored as a (Dim+1)^2 matrix whose last row is
+ * assumed to be [0 ... 0 1]. */
+ Affine = 0x2,
+ /** Transformation is an affine transformation stored as a (Dim) x (Dim+1) matrix. */
+ AffineCompact = 0x10 | Affine,
+ /** Transformation is a general projective transformation stored as a (Dim+1)^2 matrix. */
+ Projective = 0x20
+};
+
+/** \internal \ingroup enums
+ * Enum used to choose between implementation depending on the computer architecture. */
+namespace Architecture
+{
+ enum Type {
+ Generic = 0x0,
+ SSE = 0x1,
+ AltiVec = 0x2,
+ VSX = 0x3,
+ NEON = 0x4,
+#if defined EIGEN_VECTORIZE_SSE
+ Target = SSE
+#elif defined EIGEN_VECTORIZE_ALTIVEC
+ Target = AltiVec
+#elif defined EIGEN_VECTORIZE_VSX
+ Target = VSX
+#elif defined EIGEN_VECTORIZE_NEON
+ Target = NEON
+#else
+ Target = Generic
+#endif
+ };
+}
+
+/** \internal \ingroup enums
+ * Enum used as template parameter in GeneralProduct. */
+enum { CoeffBasedProductMode, LazyCoeffBasedProductMode, OuterProduct, InnerProduct, GemvProduct, GemmProduct };
+
+/** \internal \ingroup enums
+ * Enum used in experimental parallel implementation. */
+enum Action {GetAction, SetAction};
+
+/** The type used to identify a dense storage. */
+struct Dense {};
+
+/** The type used to identify a matrix expression */
+struct MatrixXpr {};
+
+/** The type used to identify an array expression */
+struct ArrayXpr {};
+
+} // end namespace Eigen
+
+#endif // EIGEN_CONSTANTS_H
diff --git a/third_party/eigen3/Eigen/src/Core/util/DisableStupidWarnings.h b/third_party/eigen3/Eigen/src/Core/util/DisableStupidWarnings.h
new file mode 100644
index 0000000000..6a0bf0629c
--- /dev/null
+++ b/third_party/eigen3/Eigen/src/Core/util/DisableStupidWarnings.h
@@ -0,0 +1,40 @@
+#ifndef EIGEN_WARNINGS_DISABLED
+#define EIGEN_WARNINGS_DISABLED
+
+#ifdef _MSC_VER
+ // 4100 - unreferenced formal parameter (occurred e.g. in aligned_allocator::destroy(pointer p))
+ // 4101 - unreferenced local variable
+ // 4127 - conditional expression is constant
+ // 4181 - qualifier applied to reference type ignored
+ // 4211 - nonstandard extension used : redefined extern to static
+ // 4244 - 'argument' : conversion from 'type1' to 'type2', possible loss of data
+ // 4273 - QtAlignedMalloc, inconsistent DLL linkage
+ // 4324 - structure was padded due to declspec(align())
+ // 4512 - assignment operator could not be generated
+ // 4522 - 'class' : multiple assignment operators specified
+ // 4700 - uninitialized local variable 'xyz' used
+ // 4717 - 'function' : recursive on all control paths, function will cause runtime stack overflow
+ #ifndef EIGEN_PERMANENTLY_DISABLE_STUPID_WARNINGS
+ #pragma warning( push )
+ #endif
+ #pragma warning( disable : 4100 4101 4127 4181 4211 4244 4273 4324 4512 4522 4700 4717 )
+#elif defined __INTEL_COMPILER
+ // 2196 - routine is both "inline" and "noinline" ("noinline" assumed)
+ // ICC 12 generates this warning even without any inline keyword, when defining class methods 'inline' i.e. inside of class body
+ // typedef that may be a reference type.
+ // 279 - controlling expression is constant
+ // ICC 12 generates this warning on assert(constant_expression_depending_on_template_params) and frankly this is a legitimate use case.
+ #ifndef EIGEN_PERMANENTLY_DISABLE_STUPID_WARNINGS
+ #pragma warning push
+ #endif
+ #pragma warning disable 2196 279
+#elif defined __clang__
+ // -Wconstant-logical-operand - warning: use of logical && with constant operand; switch to bitwise & or remove constant
+ // this is really a stupid warning as it warns on compile-time expressions involving enums
+ #ifndef EIGEN_PERMANENTLY_DISABLE_STUPID_WARNINGS
+ #pragma clang diagnostic push
+ #endif
+ #pragma clang diagnostic ignored "-Wconstant-logical-operand"
+#endif
+
+#endif // not EIGEN_WARNINGS_DISABLED
diff --git a/third_party/eigen3/Eigen/src/Core/util/ForwardDeclarations.h b/third_party/eigen3/Eigen/src/Core/util/ForwardDeclarations.h
new file mode 100644
index 0000000000..be39d731ad
--- /dev/null
+++ b/third_party/eigen3/Eigen/src/Core/util/ForwardDeclarations.h
@@ -0,0 +1,301 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2007-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_FORWARDDECLARATIONS_H
+#define EIGEN_FORWARDDECLARATIONS_H
+
+namespace Eigen {
+namespace internal {
+
+template<typename T> struct traits;
+
+// here we say once and for all that traits<const T> == traits<T>
+// When constness must affect traits, it has to be constness on template parameters on which T itself depends.
+// For example, traits<Map<const T> > != traits<Map<T> >, but
+// traits<const Map<T> > == traits<Map<T> >
+template<typename T> struct traits<const T> : traits<T> {};
+
+template<typename Derived> struct has_direct_access
+{
+ enum { ret = (traits<Derived>::Flags & DirectAccessBit) ? 1 : 0 };
+};
+
+template<typename Derived> struct accessors_level
+{
+ enum { has_direct_access = (traits<Derived>::Flags & DirectAccessBit) ? 1 : 0,
+ has_write_access = (traits<Derived>::Flags & LvalueBit) ? 1 : 0,
+ value = has_direct_access ? (has_write_access ? DirectWriteAccessors : DirectAccessors)
+ : (has_write_access ? WriteAccessors : ReadOnlyAccessors)
+ };
+};
+
+} // end namespace internal
+
+template<typename T> struct NumTraits;
+
+template<typename Derived> struct EigenBase;
+template<typename Derived> class DenseBase;
+template<typename Derived> class PlainObjectBase;
+
+
+template<typename Derived,
+ int Level = internal::accessors_level<Derived>::value >
+class DenseCoeffsBase;
+
+template<typename _Scalar, int _Rows, int _Cols,
+ int _Options = AutoAlign |
+#if EIGEN_GNUC_AT(3,4)
+ // workaround a bug in at least gcc 3.4.6
+ // the innermost ?: ternary operator is misparsed. We write it slightly
+ // differently and this makes gcc 3.4.6 happy, but it's ugly.
+ // The error would only show up with EIGEN_DEFAULT_TO_ROW_MAJOR is defined
+ // (when EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION is RowMajor)
+ ( (_Rows==1 && _Cols!=1) ? Eigen::RowMajor
+ : !(_Cols==1 && _Rows!=1) ? EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION
+ : Eigen::ColMajor ),
+#else
+ ( (_Rows==1 && _Cols!=1) ? Eigen::RowMajor
+ : (_Cols==1 && _Rows!=1) ? Eigen::ColMajor
+ : EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION ),
+#endif
+ int _MaxRows = _Rows,
+ int _MaxCols = _Cols
+> class Matrix;
+
+template<typename Derived> class MatrixBase;
+template<typename Derived> class ArrayBase;
+
+template<typename ExpressionType, unsigned int Added, unsigned int Removed> class Flagged;
+template<typename ExpressionType, template <typename> class StorageBase > class NoAlias;
+template<typename ExpressionType> class NestByValue;
+template<typename ExpressionType> class ForceAlignedAccess;
+template<typename ExpressionType> class SwapWrapper;
+
+template<typename XprType, int BlockRows=Dynamic, int BlockCols=Dynamic, bool InnerPanel = false> class Block;
+
+template<typename MatrixType, int Size=Dynamic> class VectorBlock;
+template<typename MatrixType> class Transpose;
+template<typename MatrixType> class Conjugate;
+template<typename NullaryOp, typename MatrixType> class CwiseNullaryOp;
+template<typename UnaryOp, typename MatrixType> class CwiseUnaryOp;
+template<typename ViewOp, typename MatrixType> class CwiseUnaryView;
+template<typename BinaryOp, typename Lhs, typename Rhs> class CwiseBinaryOp;
+template<typename BinOp, typename Lhs, typename Rhs> class SelfCwiseBinaryOp;
+template<typename Derived, typename Lhs, typename Rhs> class ProductBase;
+template<typename Lhs, typename Rhs> class Product;
+template<typename Lhs, typename Rhs, int Mode> class GeneralProduct;
+template<typename Lhs, typename Rhs, int NestingFlags> class CoeffBasedProduct;
+
+template<typename Derived> class DiagonalBase;
+template<typename _DiagonalVectorType> class DiagonalWrapper;
+template<typename _Scalar, int SizeAtCompileTime, int MaxSizeAtCompileTime=SizeAtCompileTime> class DiagonalMatrix;
+template<typename MatrixType, typename DiagonalType, int ProductOrder> class DiagonalProduct;
+template<typename MatrixType, int Index = 0> class Diagonal;
+template<int SizeAtCompileTime, int MaxSizeAtCompileTime = SizeAtCompileTime, typename IndexType=int> class PermutationMatrix;
+template<int SizeAtCompileTime, int MaxSizeAtCompileTime = SizeAtCompileTime, typename IndexType=int> class Transpositions;
+template<typename Derived> class PermutationBase;
+template<typename Derived> class TranspositionsBase;
+template<typename _IndicesType> class PermutationWrapper;
+template<typename _IndicesType> class TranspositionsWrapper;
+
+template<typename Derived,
+ int Level = internal::accessors_level<Derived>::has_write_access ? WriteAccessors : ReadOnlyAccessors
+> class MapBase;
+template<int InnerStrideAtCompileTime, int OuterStrideAtCompileTime> class Stride;
+template<typename MatrixType, int MapOptions=Unaligned, typename StrideType = Stride<0,0> > class Map;
+
+template<typename Derived> class TriangularBase;
+template<typename MatrixType, unsigned int Mode> class TriangularView;
+template<typename MatrixType, unsigned int Mode> class SelfAdjointView;
+template<typename MatrixType> class SparseView;
+template<typename ExpressionType> class WithFormat;
+template<typename MatrixType> struct CommaInitializer;
+template<typename Derived> class ReturnByValue;
+template<typename ExpressionType> class ArrayWrapper;
+template<typename ExpressionType> class MatrixWrapper;
+
+namespace internal {
+template<typename DecompositionType, typename Rhs> struct solve_retval_base;
+template<typename DecompositionType, typename Rhs> struct solve_retval;
+template<typename DecompositionType> struct kernel_retval_base;
+template<typename DecompositionType> struct kernel_retval;
+template<typename DecompositionType> struct image_retval_base;
+template<typename DecompositionType> struct image_retval;
+} // end namespace internal
+
+namespace internal {
+template<typename _Scalar, int Rows=Dynamic, int Cols=Dynamic, int Supers=Dynamic, int Subs=Dynamic, int Options=0> class BandMatrix;
+}
+
+namespace internal {
+template<typename Lhs, typename Rhs> struct product_type;
+}
+
+template<typename Lhs, typename Rhs,
+ int ProductType = internal::product_type<Lhs,Rhs>::value>
+struct ProductReturnType;
+
+// this is a workaround for sun CC
+template<typename Lhs, typename Rhs> struct LazyProductReturnType;
+
+namespace internal {
+
+// Provides scalar/packet-wise product and product with accumulation
+// with optional conjugation of the arguments.
+template<typename LhsScalar, typename RhsScalar, bool ConjLhs=false, bool ConjRhs=false> struct conj_helper;
+
+template<typename Scalar> struct scalar_sum_op;
+template<typename Scalar> struct scalar_difference_op;
+template<typename LhsScalar,typename RhsScalar> struct scalar_conj_product_op;
+template<typename Scalar> struct scalar_opposite_op;
+template<typename Scalar> struct scalar_conjugate_op;
+template<typename Scalar> struct scalar_real_op;
+template<typename Scalar> struct scalar_imag_op;
+template<typename Scalar> struct scalar_abs_op;
+template<typename Scalar> struct scalar_abs2_op;
+template<typename Scalar> struct scalar_sqrt_op;
+template<typename Scalar> struct scalar_rsqrt_op;
+template<typename Scalar> struct scalar_exp_op;
+template<typename Scalar> struct scalar_log_op;
+template<typename Scalar> struct scalar_cos_op;
+template<typename Scalar> struct scalar_sin_op;
+template<typename Scalar> struct scalar_acos_op;
+template<typename Scalar> struct scalar_asin_op;
+template<typename Scalar> struct scalar_tan_op;
+template<typename Scalar> struct scalar_pow_op;
+template<typename Scalar> struct scalar_inverse_op;
+template<typename Scalar> struct scalar_square_op;
+template<typename Scalar> struct scalar_cube_op;
+template<typename Scalar, typename NewType> struct scalar_cast_op;
+template<typename Scalar> struct scalar_multiple_op;
+template<typename Scalar> struct scalar_quotient1_op;
+template<typename Scalar> struct scalar_min_op;
+template<typename Scalar> struct scalar_max_op;
+template<typename Scalar> struct scalar_random_op;
+template<typename Scalar> struct scalar_add_op;
+template<typename Scalar> struct scalar_constant_op;
+template<typename Scalar> struct scalar_identity_op;
+
+template<typename LhsScalar,typename RhsScalar=LhsScalar> struct scalar_product_op;
+template<typename LhsScalar,typename RhsScalar> struct scalar_multiple2_op;
+template<typename LhsScalar,typename RhsScalar=LhsScalar> struct scalar_quotient_op;
+
+} // end namespace internal
+
+struct IOFormat;
+
+// Array module
+template<typename _Scalar, int _Rows, int _Cols,
+ int _Options = AutoAlign |
+#if EIGEN_GNUC_AT(3,4)
+ // workaround a bug in at least gcc 3.4.6
+ // the innermost ?: ternary operator is misparsed. We write it slightly
+ // differently and this makes gcc 3.4.6 happy, but it's ugly.
+ // The error would only show up with EIGEN_DEFAULT_TO_ROW_MAJOR is defined
+ // (when EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION is RowMajor)
+ ( (_Rows==1 && _Cols!=1) ? Eigen::RowMajor
+ : !(_Cols==1 && _Rows!=1) ? EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION
+ : Eigen::ColMajor ),
+#else
+ ( (_Rows==1 && _Cols!=1) ? Eigen::RowMajor
+ : (_Cols==1 && _Rows!=1) ? Eigen::ColMajor
+ : EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION ),
+#endif
+ int _MaxRows = _Rows, int _MaxCols = _Cols> class Array;
+template<typename ConditionMatrixType, typename ThenMatrixType, typename ElseMatrixType> class Select;
+template<typename MatrixType, typename BinaryOp, int Direction> class PartialReduxExpr;
+template<typename ExpressionType, int Direction> class VectorwiseOp;
+template<typename MatrixType,int RowFactor,int ColFactor> class Replicate;
+template<typename MatrixType, int Direction = BothDirections> class Reverse;
+
+template<typename MatrixType> class FullPivLU;
+template<typename MatrixType> class PartialPivLU;
+namespace internal {
+template<typename MatrixType> struct inverse_impl;
+}
+template<typename MatrixType> class HouseholderQR;
+template<typename MatrixType> class ColPivHouseholderQR;
+template<typename MatrixType> class FullPivHouseholderQR;
+template<typename MatrixType, int QRPreconditioner = ColPivHouseholderQRPreconditioner> class JacobiSVD;
+template<typename MatrixType, int UpLo = Lower> class LLT;
+template<typename MatrixType, int UpLo = Lower> class LDLT;
+template<typename VectorsType, typename CoeffsType, int Side=OnTheLeft> class HouseholderSequence;
+template<typename Scalar> class JacobiRotation;
+
+// Geometry module:
+template<typename Derived, int _Dim> class RotationBase;
+template<typename Lhs, typename Rhs> class Cross;
+template<typename Derived> class QuaternionBase;
+template<typename Scalar> class Rotation2D;
+template<typename Scalar> class AngleAxis;
+template<typename Scalar,int Dim> class Translation;
+
+#ifdef EIGEN2_SUPPORT
+template<typename Derived, int _Dim> class eigen2_RotationBase;
+template<typename Lhs, typename Rhs> class eigen2_Cross;
+template<typename Scalar> class eigen2_Quaternion;
+template<typename Scalar> class eigen2_Rotation2D;
+template<typename Scalar> class eigen2_AngleAxis;
+template<typename Scalar,int Dim> class eigen2_Transform;
+template <typename _Scalar, int _AmbientDim> class eigen2_ParametrizedLine;
+template <typename _Scalar, int _AmbientDim> class eigen2_Hyperplane;
+template<typename Scalar,int Dim> class eigen2_Translation;
+template<typename Scalar,int Dim> class eigen2_Scaling;
+#endif
+
+#if EIGEN2_SUPPORT_STAGE < STAGE20_RESOLVE_API_CONFLICTS
+template<typename Scalar> class Quaternion;
+template<typename Scalar,int Dim> class Transform;
+template <typename _Scalar, int _AmbientDim> class ParametrizedLine;
+template <typename _Scalar, int _AmbientDim> class Hyperplane;
+template<typename Scalar,int Dim> class Scaling;
+#endif
+
+#if EIGEN2_SUPPORT_STAGE > STAGE20_RESOLVE_API_CONFLICTS
+template<typename Scalar, int Options = AutoAlign> class Quaternion;
+template<typename Scalar,int Dim,int Mode,int _Options=AutoAlign> class Transform;
+template <typename _Scalar, int _AmbientDim, int Options=AutoAlign> class ParametrizedLine;
+template <typename _Scalar, int _AmbientDim, int Options=AutoAlign> class Hyperplane;
+template<typename Scalar> class UniformScaling;
+template<typename MatrixType,int Direction> class Homogeneous;
+#endif
+
+// MatrixFunctions module
+template<typename Derived> struct MatrixExponentialReturnValue;
+template<typename Derived> class MatrixFunctionReturnValue;
+template<typename Derived> class MatrixSquareRootReturnValue;
+template<typename Derived> class MatrixLogarithmReturnValue;
+template<typename Derived> class MatrixPowerReturnValue;
+template<typename Derived> class MatrixComplexPowerReturnValue;
+
+namespace internal {
+template <typename Scalar>
+struct stem_function
+{
+ typedef std::complex<typename NumTraits<Scalar>::Real> ComplexScalar;
+ typedef ComplexScalar type(ComplexScalar, int);
+};
+}
+
+
+#ifdef EIGEN2_SUPPORT
+template<typename ExpressionType> class Cwise;
+template<typename MatrixType> class Minor;
+template<typename MatrixType> class LU;
+template<typename MatrixType> class QR;
+template<typename MatrixType> class SVD;
+namespace internal {
+template<typename MatrixType, unsigned int Mode> struct eigen2_part_return_type;
+}
+#endif
+
+} // end namespace Eigen
+
+#endif // EIGEN_FORWARDDECLARATIONS_H
diff --git a/third_party/eigen3/Eigen/src/Core/util/MKL_support.h b/third_party/eigen3/Eigen/src/Core/util/MKL_support.h
new file mode 100644
index 0000000000..8acca9c8c5
--- /dev/null
+++ b/third_party/eigen3/Eigen/src/Core/util/MKL_support.h
@@ -0,0 +1,126 @@
+/*
+ Copyright (c) 2011, Intel Corporation. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without modification,
+ are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors may
+ be used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ ********************************************************************************
+ * Content : Eigen bindings to Intel(R) MKL
+ * Include file with common MKL declarations
+ ********************************************************************************
+*/
+
+#ifndef EIGEN_MKL_SUPPORT_H
+#define EIGEN_MKL_SUPPORT_H
+
+#ifdef EIGEN_USE_MKL_ALL
+ #ifndef EIGEN_USE_BLAS
+ #define EIGEN_USE_BLAS
+ #endif
+ #ifndef EIGEN_USE_LAPACKE
+ #define EIGEN_USE_LAPACKE
+ #endif
+ #ifndef EIGEN_USE_MKL_VML
+ #define EIGEN_USE_MKL_VML
+ #endif
+#endif
+
+#ifdef EIGEN_USE_LAPACKE_STRICT
+ #define EIGEN_USE_LAPACKE
+#endif
+
+#if defined(EIGEN_USE_BLAS) || defined(EIGEN_USE_LAPACKE) || defined(EIGEN_USE_MKL_VML)
+ #define EIGEN_USE_MKL
+#endif
+
+#if defined EIGEN_USE_MKL
+# include <mkl.h>
+/*Check IMKL version for compatibility: < 10.3 is not usable with Eigen*/
+# ifndef INTEL_MKL_VERSION
+# undef EIGEN_USE_MKL /* INTEL_MKL_VERSION is not even defined on older versions */
+# elif INTEL_MKL_VERSION < 100305 /* the intel-mkl-103-release-notes say this was when the lapacke.h interface was added*/
+# undef EIGEN_USE_MKL
+# endif
+# ifndef EIGEN_USE_MKL
+ /*If the MKL version is too old, undef everything*/
+# undef EIGEN_USE_MKL_ALL
+# undef EIGEN_USE_BLAS
+# undef EIGEN_USE_LAPACKE
+# undef EIGEN_USE_MKL_VML
+# undef EIGEN_USE_LAPACKE_STRICT
+# undef EIGEN_USE_LAPACKE
+# endif
+#endif
+
+#if defined EIGEN_USE_MKL
+#include <mkl_lapacke.h>
+#define EIGEN_MKL_VML_THRESHOLD 128
+
+namespace Eigen {
+
+typedef std::complex<double> dcomplex;
+typedef std::complex<float> scomplex;
+
+namespace internal {
+
+template<typename MKLType, typename EigenType>
+static inline void assign_scalar_eig2mkl(MKLType& mklScalar, const EigenType& eigenScalar) {
+ mklScalar=eigenScalar;
+}
+
+template<typename MKLType, typename EigenType>
+static inline void assign_conj_scalar_eig2mkl(MKLType& mklScalar, const EigenType& eigenScalar) {
+ mklScalar=eigenScalar;
+}
+
+template <>
+inline void assign_scalar_eig2mkl<MKL_Complex16,dcomplex>(MKL_Complex16& mklScalar, const dcomplex& eigenScalar) {
+ mklScalar.real=eigenScalar.real();
+ mklScalar.imag=eigenScalar.imag();
+}
+
+template <>
+inline void assign_scalar_eig2mkl<MKL_Complex8,scomplex>(MKL_Complex8& mklScalar, const scomplex& eigenScalar) {
+ mklScalar.real=eigenScalar.real();
+ mklScalar.imag=eigenScalar.imag();
+}
+
+template <>
+inline void assign_conj_scalar_eig2mkl<MKL_Complex16,dcomplex>(MKL_Complex16& mklScalar, const dcomplex& eigenScalar) {
+ mklScalar.real=eigenScalar.real();
+ mklScalar.imag=-eigenScalar.imag();
+}
+
+template <>
+inline void assign_conj_scalar_eig2mkl<MKL_Complex8,scomplex>(MKL_Complex8& mklScalar, const scomplex& eigenScalar) {
+ mklScalar.real=eigenScalar.real();
+ mklScalar.imag=-eigenScalar.imag();
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif
+
+#endif // EIGEN_MKL_SUPPORT_H
diff --git a/third_party/eigen3/Eigen/src/Core/util/Macros.h b/third_party/eigen3/Eigen/src/Core/util/Macros.h
new file mode 100644
index 0000000000..729a451324
--- /dev/null
+++ b/third_party/eigen3/Eigen/src/Core/util/Macros.h
@@ -0,0 +1,740 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_MACROS_H
+#define EIGEN_MACROS_H
+
+#define EIGEN_WORLD_VERSION 3
+#define EIGEN_MAJOR_VERSION 2
+#define EIGEN_MINOR_VERSION 90
+
+#define EIGEN_VERSION_AT_LEAST(x,y,z) (EIGEN_WORLD_VERSION>x || (EIGEN_WORLD_VERSION>=x && \
+ (EIGEN_MAJOR_VERSION>y || (EIGEN_MAJOR_VERSION>=y && \
+ EIGEN_MINOR_VERSION>=z))))
+
+// Compiler identification, EIGEN_COMP_*
+/// \internal EIGEN_COMP_GNUC set to 1 for all compilers compatible with GCC
+#ifdef __GNUC__
+ #define EIGEN_COMP_GNUC 1
+#else
+ #define EIGEN_COMP_GNUC 0
+#endif
+
+/// \internal EIGEN_COMP_CLANG set to 1 if the compiler is clang (alias for __clang__)
+#if defined(__clang__)
+ #define EIGEN_COMP_CLANG 1
+#else
+ #define EIGEN_COMP_CLANG 0
+#endif
+
+
+/// \internal EIGEN_COMP_LLVM set to 1 if the compiler backend is llvm
+#if defined(__llvm__)
+ #define EIGEN_COMP_LLVM 1
+#else
+ #define EIGEN_COMP_LLVM 0
+#endif
+
+/// \internal EIGEN_COMP_ICC set to __INTEL_COMPILER if the compiler is Intel compiler, 0 otherwise
+#if defined(__INTEL_COMPILER)
+ #define EIGEN_COMP_ICC __INTEL_COMPILER
+#else
+ #define EIGEN_COMP_ICC 0
+#endif
+
+/// \internal EIGEN_COMP_MINGW set to 1 if the compiler is mingw
+#if defined(__MINGW32__)
+ #define EIGEN_COMP_MINGW 1
+#else
+ #define EIGEN_COMP_MINGW 0
+#endif
+
+/// \internal EIGEN_COMP_SUNCC set to 1 if the compiler is Solaris Studio
+#if defined(__SUNPRO_CC)
+ #define EIGEN_COMP_SUNCC 1
+#else
+ #define EIGEN_COMP_SUNCC 0
+#endif
+
+/// \internal EIGEN_COMP_MSVC set to _MSC_VER if the compiler is Microsoft Visual C++, 0 otherwise.
+#if defined(_MSC_VER)
+ #define EIGEN_COMP_MSVC _MSC_VER
+#else
+ #define EIGEN_COMP_MSVC 0
+#endif
+
+/// \internal EIGEN_COMP_MSVC_STRICT set to 1 if the compiler is really Microsoft Visual C++ and not ,e.g., ICC
+#if EIGEN_COMP_MSVC && !(EIGEN_COMP_ICC)
+ #define EIGEN_COMP_MSVC_STRICT 1
+#else
+ #define EIGEN_COMP_MSVC_STRICT 0
+#endif
+
+/// \internal EIGEN_COMP_IBM set to 1 if the compiler is IBM XL C++
+#if defined(__IBMCPP__) || defined(__xlc__)
+ #define EIGEN_COMP_IBM 1
+#else
+ #define EIGEN_COMP_IBM 0
+#endif
+
+/// \internal EIGEN_COMP_PGI set to 1 if the compiler is Portland Group Compiler
+#if defined(__PGI)
+ #define EIGEN_COMP_PGI 1
+#else
+ #define EIGEN_COMP_PGI 0
+#endif
+
+/// \internal EIGEN_COMP_ARM set to 1 if the compiler is ARM Compiler
+#if defined(__CC_ARM) || defined(__ARMCC_VERSION)
+ #define EIGEN_COMP_ARM 1
+#else
+ #define EIGEN_COMP_ARM 0
+#endif
+
+
+/// \internal EIGEN_GNUC_STRICT set to 1 if the compiler is really GCC and not a compatible compiler (e.g., ICC, clang, mingw, etc.)
+#if EIGEN_COMP_GNUC && !(EIGEN_COMP_CLANG || EIGEN_COMP_CLANG || EIGEN_COMP_MINGW || EIGEN_COMP_PGI || EIGEN_COMP_IBM || EIGEN_COMP_ARM )
+ #define EIGEN_COMP_GNUC_STRICT 1
+#else
+ #define EIGEN_COMP_GNUC_STRICT 0
+#endif
+
+
+#if EIGEN_COMP_GNUC
+ #define EIGEN_GNUC_AT_LEAST(x,y) ((__GNUC__==x && __GNUC_MINOR__>=y) || __GNUC__>x)
+ #define EIGEN_GNUC_AT_MOST(x,y) ((__GNUC__==x && __GNUC_MINOR__<=y) || __GNUC__<x)
+ #define EIGEN_GNUC_AT(x,y) ( __GNUC__==x && __GNUC_MINOR__==y )
+#else
+ #define EIGEN_GNUC_AT_LEAST(x,y) 0
+ #define EIGEN_GNUC_AT_MOST(x,y) 0
+ #define EIGEN_GNUC_AT(x,y) 0
+#endif
+
+// FIXME: could probably be removed as we do not support gcc 3.x anymore
+#if EIGEN_COMP_GNUC && (__GNUC__ <= 3)
+#define EIGEN_GCC3_OR_OLDER 1
+#else
+#define EIGEN_GCC3_OR_OLDER 0
+#endif
+
+
+// Architecture identification, EIGEN_ARCH_*
+
+#if defined(__x86_64__) || defined(_M_X64) || defined(__amd64)
+ #define EIGEN_ARCH_x86_64 1
+#else
+ #define EIGEN_ARCH_x86_64 0
+#endif
+
+#if defined(__i386__) || defined(_M_IX86) || defined(_X86_) || defined(__i386)
+ #define EIGEN_ARCH_i386 1
+#else
+ #define EIGEN_ARCH_i386 0
+#endif
+
+#if EIGEN_ARCH_x86_64 || EIGEN_ARCH_i386
+ #define EIGEN_ARCH_i386_OR_x86_64 1
+#else
+ #define EIGEN_ARCH_i386_OR_x86_64 0
+#endif
+
+/// \internal EIGEN_ARCH_ARM set to 1 if the architecture is ARM
+#if defined(__arm__)
+ #define EIGEN_ARCH_ARM 1
+#else
+ #define EIGEN_ARCH_ARM 0
+#endif
+
+/// \internal EIGEN_ARCH_ARM64 set to 1 if the architecture is ARM64
+#if defined(__aarch64__)
+ #define EIGEN_ARCH_ARM64 1
+#else
+ #define EIGEN_ARCH_ARM64 0
+#endif
+
+#if EIGEN_ARCH_ARM || EIGEN_ARCH_ARM64
+ #define EIGEN_ARCH_ARM_OR_ARM64 1
+#else
+ #define EIGEN_ARCH_ARM_OR_ARM64 0
+#endif
+
+/// \internal EIGEN_ARCH_MIPS set to 1 if the architecture is MIPS
+#if defined(__mips__) || defined(__mips)
+ #define EIGEN_ARCH_MIPS 1
+#else
+ #define EIGEN_ARCH_MIPS 0
+#endif
+
+/// \internal EIGEN_ARCH_SPARC set to 1 if the architecture is SPARC
+#if defined(__sparc__) || defined(__sparc)
+ #define EIGEN_ARCH_SPARC 1
+#else
+ #define EIGEN_ARCH_SPARC 0
+#endif
+
+/// \internal EIGEN_ARCH_IA64 set to 1 if the architecture is Intel Itanium
+#if defined(__ia64__)
+ #define EIGEN_ARCH_IA64 1
+#else
+ #define EIGEN_ARCH_IA64 0
+#endif
+
+/// \internal EIGEN_ARCH_PPC set to 1 if the architecture is PowerPC
+#if defined(__powerpc__) || defined(__ppc__) || defined(_M_PPC)
+ #define EIGEN_ARCH_PPC 1
+#else
+ #define EIGEN_ARCH_PPC 0
+#endif
+
+
+
+// Operating system identification, EIGEN_OS_*
+
+/// \internal EIGEN_OS_UNIX set to 1 if the OS is a unix variant
+#if defined(__unix__) || defined(__unix)
+ #define EIGEN_OS_UNIX 1
+#else
+ #define EIGEN_OS_UNIX 0
+#endif
+
+/// \internal EIGEN_OS_LINUX set to 1 if the OS is based on Linux kernel
+#if defined(__linux__)
+ #define EIGEN_OS_LINUX 1
+#else
+ #define EIGEN_OS_LINUX 0
+#endif
+
+/// \internal EIGEN_OS_ANDROID set to 1 if the OS is Android
+// note: ANDROID is defined when using ndk_build, __ANDROID__ is defined when using a standalone toolchain.
+#if defined(__ANDROID__) || defined(ANDROID)
+ #define EIGEN_OS_ANDROID 1
+#else
+ #define EIGEN_OS_ANDROID 0
+#endif
+
+/// \internal EIGEN_OS_GNULINUX set to 1 if the OS is GNU Linux and not Linux-based OS (e.g., not android)
+#if defined(__gnu_linux__) && !(EIGEN_OS_ANDROID)
+ #define EIGEN_OS_GNULINUX 1
+#else
+ #define EIGEN_OS_GNULINUX 0
+#endif
+
+/// \internal EIGEN_OS_BSD set to 1 if the OS is a BSD variant
+#if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__bsdi__) || defined(__DragonFly__)
+ #define EIGEN_OS_BSD 1
+#else
+ #define EIGEN_OS_BSD 0
+#endif
+
+/// \internal EIGEN_OS_MAC set to 1 if the OS is MacOS
+#if defined(__APPLE__)
+ #define EIGEN_OS_MAC 1
+#else
+ #define EIGEN_OS_MAC 0
+#endif
+
+/// \internal EIGEN_OS_QNX set to 1 if the OS is QNX
+#if defined(__QNX__)
+ #define EIGEN_OS_QNX 1
+#else
+ #define EIGEN_OS_QNX 0
+#endif
+
+/// \internal EIGEN_OS_WIN set to 1 if the OS is Windows based
+#if defined(_WIN32)
+ #define EIGEN_OS_WIN 1
+#else
+ #define EIGEN_OS_WIN 0
+#endif
+
+/// \internal EIGEN_OS_WIN64 set to 1 if the OS is Windows 64bits
+#if defined(_WIN64)
+ #define EIGEN_OS_WIN64 1
+#else
+ #define EIGEN_OS_WIN64 0
+#endif
+
+/// \internal EIGEN_OS_WINCE set to 1 if the OS is Windows CE
+#if defined(_WIN32_WCE)
+ #define EIGEN_OS_WINCE 1
+#else
+ #define EIGEN_OS_WINCE 0
+#endif
+
+/// \internal EIGEN_OS_CYGWIN set to 1 if the OS is Windows/Cygwin
+#if defined(__CYGWIN__)
+ #define EIGEN_OS_CYGWIN 1
+#else
+ #define EIGEN_OS_CYGWIN 0
+#endif
+
+/// \internal EIGEN_OS_WIN_STRICT set to 1 if the OS is really Windows and not some variants
+#if EIGEN_OS_WIN && !( EIGEN_OS_WINCE || EIGEN_OS_CYGWIN )
+ #define EIGEN_OS_WIN_STRICT 1
+#else
+ #define EIGEN_OS_WIN_STRICT 0
+#endif
+
+
+
+
+#if EIGEN_GNUC_AT_MOST(4,3) && !EIGEN_COMP_CLANG
+ // see bug 89
+ #define EIGEN_SAFE_TO_USE_STANDARD_ASSERT_MACRO 0
+#else
+ #define EIGEN_SAFE_TO_USE_STANDARD_ASSERT_MACRO 1
+#endif
+
+// 16 byte alignment is only useful for vectorization. Since it affects the ABI, we need to enable
+// 16 byte alignment on all platforms where vectorization might be enabled. In theory we could always
+// enable alignment, but it can be a cause of problems on some platforms, so we just disable it in
+// certain common platform (compiler+architecture combinations) to avoid these problems.
+// Only static alignment is really problematic (relies on nonstandard compiler extensions that don't
+// work everywhere, for example don't work on GCC/ARM), try to keep heap alignment even
+// when we have to disable static alignment.
+#if EIGEN_COMP_GNUC && !(EIGEN_ARCH_i386_OR_x86_64 || EIGEN_ARCH_PPC || EIGEN_ARCH_IA64)
+#define EIGEN_GCC_AND_ARCH_DOESNT_WANT_STACK_ALIGNMENT 1
+#else
+#define EIGEN_GCC_AND_ARCH_DOESNT_WANT_STACK_ALIGNMENT 0
+#endif
+
+// static alignment is completely disabled with GCC 3, Sun Studio, and QCC/QNX
+#if !EIGEN_GCC_AND_ARCH_DOESNT_WANT_STACK_ALIGNMENT \
+ && !EIGEN_GCC3_OR_OLDER \
+ && !EIGEN_COMP_SUNCC \
+ && !EIGEN_OS_QNX
+ #define EIGEN_ARCH_WANTS_STACK_ALIGNMENT 1
+#else
+ #define EIGEN_ARCH_WANTS_STACK_ALIGNMENT 0
+#endif
+
+// Defined the boundary (in bytes) on which the data needs to be aligned. Note
+// that unless EIGEN_ALIGN is defined and not equal to 0, the data may not be
+// aligned at all regardless of the value of this #define.
+#define EIGEN_ALIGN_BYTES 16
+
+#ifdef EIGEN_DONT_ALIGN
+ #ifndef EIGEN_DONT_ALIGN_STATICALLY
+ #define EIGEN_DONT_ALIGN_STATICALLY
+ #endif
+ #define EIGEN_ALIGN 0
+#elif !defined(EIGEN_DONT_VECTORIZE)
+ #if defined(__AVX__)
+ #undef EIGEN_ALIGN_BYTES
+ #define EIGEN_ALIGN_BYTES 32
+ #endif
+ #define EIGEN_ALIGN 1
+#else
+ #define EIGEN_ALIGN 0
+#endif
+
+#define EIGEN_MAX_ALIGN_BYTES EIGEN_ALIGN_BYTES
+
+
+// This macro can be used to prevent from macro expansion, e.g.:
+// std::max EIGEN_NOT_A_MACRO(a,b)
+#define EIGEN_NOT_A_MACRO
+
+// EIGEN_ALIGN_STATICALLY is the true test whether we want to align arrays on the stack or not. It takes into account both the user choice to explicitly disable
+// alignment (EIGEN_DONT_ALIGN_STATICALLY) and the architecture config (EIGEN_ARCH_WANTS_STACK_ALIGNMENT). Henceforth, only EIGEN_ALIGN_STATICALLY should be used.
+#if EIGEN_ARCH_WANTS_STACK_ALIGNMENT && !defined(EIGEN_DONT_ALIGN_STATICALLY)
+ #define EIGEN_ALIGN_STATICALLY 1
+#else
+ #define EIGEN_ALIGN_STATICALLY 0
+ #ifndef EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT
+ #define EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT
+ #endif
+#endif
+
+#ifdef EIGEN_DEFAULT_TO_ROW_MAJOR
+#define EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION Eigen::RowMajor
+#else
+#define EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION Eigen::ColMajor
+#endif
+
+#ifndef EIGEN_DEFAULT_DENSE_INDEX_TYPE
+#define EIGEN_DEFAULT_DENSE_INDEX_TYPE std::ptrdiff_t
+#endif
+
+// Cross compiler wrapper around LLVM's __has_builtin
+#ifdef __has_builtin
+# define EIGEN_HAS_BUILTIN(x) __has_builtin(x)
+#else
+# define EIGEN_HAS_BUILTIN(x) 0
+#endif
+
+// A Clang feature extension to determine compiler features.
+// We use it to determine 'cxx_rvalue_references'
+#ifndef __has_feature
+# define __has_feature(x) 0
+#endif
+
+#if __cplusplus > 199711L
+#define EIGEN_HAS_VARIADIC_TEMPLATES 1
+#endif
+
+// Does the compiler support const expressions?
+#if __cplusplus > 199711L && !defined(__NVCC__) && !defined(GOOGLE_LIBCXX) && !defined(__APPLE__)
+#define EIGEN_HAS_CONSTEXPR 1
+#endif
+
+/** Allows to disable some optimizations which might affect the accuracy of the result.
+ * Such optimization are enabled by default, and set EIGEN_FAST_MATH to 0 to disable them.
+ * They currently include:
+ * - single precision Cwise::sin() and Cwise::cos() when SSE vectorization is enabled.
+ */
+#ifndef EIGEN_FAST_MATH
+#define EIGEN_FAST_MATH 1
+#endif
+
+#define EIGEN_DEBUG_VAR(x) std::cerr << #x << " = " << x << std::endl;
+
+// concatenate two tokens
+#define EIGEN_CAT2(a,b) a ## b
+#define EIGEN_CAT(a,b) EIGEN_CAT2(a,b)
+
+// convert a token to a string
+#define EIGEN_MAKESTRING2(a) #a
+#define EIGEN_MAKESTRING(a) EIGEN_MAKESTRING2(a)
+
+// EIGEN_STRONG_INLINE is a stronger version of the inline, using __forceinline on MSVC,
+// but it still doesn't use GCC's always_inline. This is useful in (common) situations where MSVC needs forceinline
+// but GCC is still doing fine with just inline.
+#if EIGEN_COMP_MSVC || EIGEN_COMP_ICC
+#define EIGEN_STRONG_INLINE __forceinline
+#else
+#define EIGEN_STRONG_INLINE inline
+#endif
+
+// EIGEN_ALWAYS_INLINE is the stronget, it has the effect of making the function inline and adding every possible
+// attribute to maximize inlining. This should only be used when really necessary: in particular,
+// it uses __attribute__((always_inline)) on GCC, which most of the time is useless and can severely harm compile times.
+// FIXME with the always_inline attribute,
+// gcc 3.4.x reports the following compilation error:
+// Eval.h:91: sorry, unimplemented: inlining failed in call to 'const Eigen::Eval<Derived> Eigen::MatrixBase<Scalar, Derived>::eval() const'
+// : function body not available
+#if EIGEN_GNUC_AT_LEAST(4,0)
+#define EIGEN_ALWAYS_INLINE __attribute__((always_inline)) inline
+#else
+#define EIGEN_ALWAYS_INLINE EIGEN_STRONG_INLINE
+#endif
+
+#if EIGEN_COMP_GNUC
+#define EIGEN_DONT_INLINE __attribute__((noinline))
+#elif EIGEN_COMP_MSVC
+#define EIGEN_DONT_INLINE __declspec(noinline)
+#else
+#define EIGEN_DONT_INLINE
+#endif
+
+#if EIGEN_COMP_GNUC
+#define EIGEN_PERMISSIVE_EXPR __extension__
+#else
+#define EIGEN_PERMISSIVE_EXPR
+#endif
+
+#if EIGEN_COMP_GNUC
+#define EIGEN_LIKELY(x) __builtin_expect((x), 1)
+#define EIGEN_UNLIKELY(x) __builtin_expect((x), 0)
+#else
+#define EIGEN_LIKELY(x) (x)
+#define EIGEN_UNLIKELY(x) (x)
+#endif
+
+// this macro allows to get rid of linking errors about multiply defined functions.
+// - static is not very good because it prevents definitions from different object files to be merged.
+// So static causes the resulting linked executable to be bloated with multiple copies of the same function.
+// - inline is not perfect either as it unwantedly hints the compiler toward inlining the function.
+#define EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
+#define EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS inline
+
+#ifdef NDEBUG
+# ifndef EIGEN_NO_DEBUG
+# define EIGEN_NO_DEBUG
+# endif
+#endif
+
+#if !defined(EIGEN_NO_CHECK) || (!defined(EIGEN_NO_DEBUG) && !EIGEN_SAFE_TO_USE_STANDARD_ASSERT_MACRO)
+ // Custom assertion code that works regardless of the compilation mode.
+ #include <cstdlib> // for abort
+ #include <iostream> // for std::cerr
+
+ namespace Eigen {
+ namespace internal {
+ // trivial function copying a bool. Must be EIGEN_DONT_INLINE, so we implement it after including Eigen headers.
+ // see bug 89.
+ namespace {
+ EIGEN_DONT_INLINE bool copy_bool(bool b) { return b; }
+ }
+ inline void assert_fail(const char *condition, const char *function, const char *file, int line)
+ {
+ copy_bool(true); // dummy call to avoid warnings about unused functions.
+ std::cerr << "assertion failed: " << condition << " in function " << function << " at " << file << ":" << line << std::endl;
+ abort();
+ }
+ }
+ }
+ #define eigen_internal_check(x) \
+ do { \
+ if(!Eigen::internal::copy_bool(x)) \
+ Eigen::internal::assert_fail(EIGEN_MAKESTRING(x), __PRETTY_FUNCTION__, __FILE__, __LINE__); \
+ } while(false)
+#endif
+
+#ifdef EIGEN_NO_CHECK
+ #define eigen_check(x)
+#else
+ #define eigen_check(x) eigen_internal_check(x)
+#endif
+
+// eigen_plain_assert is where we implement the workaround for the assert() bug in GCC <= 4.3, see bug 89
+#ifdef EIGEN_NO_DEBUG
+ #define eigen_plain_assert(x)
+#else
+ #if EIGEN_SAFE_TO_USE_STANDARD_ASSERT_MACRO
+ namespace Eigen {
+ namespace internal {
+ inline bool copy_bool(bool b) { return b; }
+ }
+ }
+ #define eigen_plain_assert(x) assert(x)
+ #else
+ // work around bug 89
+ #define eigen_plain_assert(x) eigen_internal_check(x)
+ #endif
+#endif
+
+// eigen_assert can be overridden
+#ifndef eigen_assert
+#define eigen_assert(x) eigen_plain_assert(x)
+#endif
+
+#ifdef EIGEN_INTERNAL_DEBUGGING
+#define eigen_internal_assert(x) eigen_assert(x)
+#else
+#define eigen_internal_assert(x)
+#endif
+
+#ifdef EIGEN_NO_DEBUG
+#define EIGEN_ONLY_USED_FOR_DEBUG(x) (void)x
+#else
+#define EIGEN_ONLY_USED_FOR_DEBUG(x)
+#endif
+
+#ifndef EIGEN_NO_DEPRECATED_WARNING
+ #if EIGEN_COMP_GNUC
+ #define EIGEN_DEPRECATED __attribute__((deprecated))
+ #elif (defined _MSC_VER)
+ #define EIGEN_DEPRECATED __declspec(deprecated)
+ #else
+ #define EIGEN_DEPRECATED
+ #endif
+#else
+ #define EIGEN_DEPRECATED
+#endif
+
+#if EIGEN_COMP_GNUC
+#define EIGEN_UNUSED __attribute__((unused))
+#else
+#define EIGEN_UNUSED
+#endif
+
+// Suppresses 'unused variable' warnings.
+namespace Eigen {
+ namespace internal {
+ template<typename T> void ignore_unused_variable(const T&) {}
+ }
+}
+#define EIGEN_UNUSED_VARIABLE(var) Eigen::internal::ignore_unused_variable(var);
+
+#if !defined(EIGEN_ASM_COMMENT)
+ #if EIGEN_COMP_GNUC && (EIGEN_ARCH_i386_OR_x86_64 || EIGEN_ARCH_ARM_OR_ARM64)
+ #define EIGEN_ASM_COMMENT(X) asm("#" X)
+ #else
+ #define EIGEN_ASM_COMMENT(X)
+ #endif
+#endif
+
+/* EIGEN_ALIGN_TO_BOUNDARY(n) forces data to be n-byte aligned. This is used to satisfy SIMD requirements.
+ * However, we do that EVEN if vectorization (EIGEN_VECTORIZE) is disabled,
+ * so that vectorization doesn't affect binary compatibility.
+ *
+ * If we made alignment depend on whether or not EIGEN_VECTORIZE is defined, it would be impossible to link
+ * vectorized and non-vectorized code.
+ */
+#if (defined __CUDACC__)
+ #define EIGEN_ALIGN_TO_BOUNDARY(n) __align__(n)
+#elif EIGEN_COMP_GNUC || EIGEN_COMP_PGI || EIGEN_COMP_IBM || EIGEN_COMP_ARM
+ #define EIGEN_ALIGN_TO_BOUNDARY(n) __attribute__((aligned(n)))
+#elif EIGEN_COMP_MSVC
+ #define EIGEN_ALIGN_TO_BOUNDARY(n) __declspec(align(n))
+#elif EIGEN_COMP_SUNCC
+ // FIXME not sure about this one:
+ #define EIGEN_ALIGN_TO_BOUNDARY(n) __attribute__((aligned(n)))
+#else
+ #error Please tell me what is the equivalent of __attribute__((aligned(n))) for your compiler
+#endif
+
+#define EIGEN_ALIGN16 EIGEN_ALIGN_TO_BOUNDARY(16)
+#define EIGEN_ALIGN32 EIGEN_ALIGN_TO_BOUNDARY(32)
+#define EIGEN_ALIGN_DEFAULT EIGEN_ALIGN_TO_BOUNDARY(EIGEN_ALIGN_BYTES)
+#define EIGEN_ALIGN_MAX EIGEN_ALIGN_DEFAULT
+
+#if EIGEN_ALIGN_STATICALLY
+#define EIGEN_USER_ALIGN_TO_BOUNDARY(n) EIGEN_ALIGN_TO_BOUNDARY(n)
+#define EIGEN_USER_ALIGN16 EIGEN_ALIGN16
+#define EIGEN_USER_ALIGN32 EIGEN_ALIGN32
+#define EIGEN_USER_ALIGN_DEFAULT EIGEN_ALIGN_DEFAULT
+#else
+#define EIGEN_USER_ALIGN_TO_BOUNDARY(n)
+#define EIGEN_USER_ALIGN16
+#define EIGEN_USER_ALIGN32
+#define EIGEN_USER_ALIGN_DEFAULT
+#endif
+
+#ifdef EIGEN_DONT_USE_RESTRICT_KEYWORD
+ #define EIGEN_RESTRICT
+#endif
+#ifndef EIGEN_RESTRICT
+ #define EIGEN_RESTRICT __restrict
+#endif
+
+#ifndef EIGEN_STACK_ALLOCATION_LIMIT
+#define EIGEN_STACK_ALLOCATION_LIMIT 20000
+#endif
+
+#ifndef EIGEN_DEFAULT_IO_FORMAT
+#ifdef EIGEN_MAKING_DOCS
+// format used in Eigen's documentation
+// needed to define it here as escaping characters in CMake add_definition's argument seems very problematic.
+#define EIGEN_DEFAULT_IO_FORMAT Eigen::IOFormat(3, 0, " ", "\n", "", "")
+#else
+#define EIGEN_DEFAULT_IO_FORMAT Eigen::IOFormat()
+#endif
+#endif
+
+// just an empty macro !
+#define EIGEN_EMPTY
+
+#if EIGEN_COMP_MSVC_STRICT
+ #define EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived) \
+ using Base::operator =;
+#elif EIGEN_COMP_CLANG // workaround clang bug (see http://forum.kde.org/viewtopic.php?f=74&t=102653)
+ #define EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived) \
+ using Base::operator =; \
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const Derived& other) { Base::operator=(other); return *this; } \
+ template <typename OtherDerived> \
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const DenseBase<OtherDerived>& other) { Base::operator=(other.derived()); return *this; }
+#else
+ #define EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived) \
+ using Base::operator =; \
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const Derived& other) \
+ { \
+ Base::operator=(other); \
+ return *this; \
+ }
+#endif
+
+#define EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Derived) EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived)
+
+/**
+* Just a side note. Commenting within defines works only by documenting
+* behind the object (via '!<'). Comments cannot be multi-line and thus
+* we have these extra long lines. What is confusing doxygen over here is
+* that we use '\' and basically have a bunch of typedefs with their
+* documentation in a single line.
+**/
+
+#define EIGEN_GENERIC_PUBLIC_INTERFACE(Derived) \
+ typedef typename Eigen::internal::traits<Derived>::Scalar Scalar; /*!< \brief Numeric type, e.g. float, double, int or std::complex<float>. */ \
+ typedef typename Eigen::NumTraits<Scalar>::Real RealScalar; /*!< \brief The underlying numeric type for composed scalar types. \details In cases where Scalar is e.g. std::complex<T>, T were corresponding to RealScalar. */ \
+ typedef typename Base::CoeffReturnType CoeffReturnType; /*!< \brief The return type for coefficient access. \details Depending on whether the object allows direct coefficient access (e.g. for a MatrixXd), this type is either 'const Scalar&' or simply 'Scalar' for objects that do not allow direct coefficient access. */ \
+ typedef typename Eigen::internal::nested<Derived>::type Nested; \
+ typedef typename Eigen::internal::traits<Derived>::StorageKind StorageKind; \
+ typedef typename Eigen::internal::traits<Derived>::Index Index; \
+ enum { RowsAtCompileTime = Eigen::internal::traits<Derived>::RowsAtCompileTime, \
+ ColsAtCompileTime = Eigen::internal::traits<Derived>::ColsAtCompileTime, \
+ Flags = Eigen::internal::traits<Derived>::Flags, \
+ CoeffReadCost = Eigen::internal::traits<Derived>::CoeffReadCost, \
+ SizeAtCompileTime = Base::SizeAtCompileTime, \
+ MaxSizeAtCompileTime = Base::MaxSizeAtCompileTime, \
+ IsVectorAtCompileTime = Base::IsVectorAtCompileTime };
+
+
+#define EIGEN_DENSE_PUBLIC_INTERFACE(Derived) \
+ typedef typename Eigen::internal::traits<Derived>::Scalar Scalar; /*!< \brief Numeric type, e.g. float, double, int or std::complex<float>. */ \
+ typedef typename Eigen::NumTraits<Scalar>::Real RealScalar; /*!< \brief The underlying numeric type for composed scalar types. \details In cases where Scalar is e.g. std::complex<T>, T were corresponding to RealScalar. */ \
+ typedef typename Base::PacketScalar PacketScalar; \
+ typedef typename Base::CoeffReturnType CoeffReturnType; /*!< \brief The return type for coefficient access. \details Depending on whether the object allows direct coefficient access (e.g. for a MatrixXd), this type is either 'const Scalar&' or simply 'Scalar' for objects that do not allow direct coefficient access. */ \
+ typedef typename Eigen::internal::nested<Derived>::type Nested; \
+ typedef typename Eigen::internal::traits<Derived>::StorageKind StorageKind; \
+ typedef typename Eigen::internal::traits<Derived>::Index Index; \
+ enum { RowsAtCompileTime = Eigen::internal::traits<Derived>::RowsAtCompileTime, \
+ ColsAtCompileTime = Eigen::internal::traits<Derived>::ColsAtCompileTime, \
+ MaxRowsAtCompileTime = Eigen::internal::traits<Derived>::MaxRowsAtCompileTime, \
+ MaxColsAtCompileTime = Eigen::internal::traits<Derived>::MaxColsAtCompileTime, \
+ Flags = Eigen::internal::traits<Derived>::Flags, \
+ CoeffReadCost = Eigen::internal::traits<Derived>::CoeffReadCost, \
+ SizeAtCompileTime = Base::SizeAtCompileTime, \
+ MaxSizeAtCompileTime = Base::MaxSizeAtCompileTime, \
+ IsVectorAtCompileTime = Base::IsVectorAtCompileTime }; \
+ using Base::derived; \
+ using Base::const_cast_derived;
+
+
+#define EIGEN_PLAIN_ENUM_MIN(a,b) (((int)a <= (int)b) ? (int)a : (int)b)
+#define EIGEN_PLAIN_ENUM_MAX(a,b) (((int)a >= (int)b) ? (int)a : (int)b)
+
+// EIGEN_SIZE_MIN_PREFER_DYNAMIC gives the min between compile-time sizes. 0 has absolute priority, followed by 1,
+// followed by Dynamic, followed by other finite values. The reason for giving Dynamic the priority over
+// finite values is that min(3, Dynamic) should be Dynamic, since that could be anything between 0 and 3.
+#define EIGEN_SIZE_MIN_PREFER_DYNAMIC(a,b) (((int)a == 0 || (int)b == 0) ? 0 \
+ : ((int)a == 1 || (int)b == 1) ? 1 \
+ : ((int)a == Dynamic || (int)b == Dynamic) ? Dynamic \
+ : ((int)a <= (int)b) ? (int)a : (int)b)
+
+// EIGEN_SIZE_MIN_PREFER_FIXED is a variant of EIGEN_SIZE_MIN_PREFER_DYNAMIC comparing MaxSizes. The difference is that finite values
+// now have priority over Dynamic, so that min(3, Dynamic) gives 3. Indeed, whatever the actual value is
+// (between 0 and 3), it is not more than 3.
+#define EIGEN_SIZE_MIN_PREFER_FIXED(a,b) (((int)a == 0 || (int)b == 0) ? 0 \
+ : ((int)a == 1 || (int)b == 1) ? 1 \
+ : ((int)a == Dynamic && (int)b == Dynamic) ? Dynamic \
+ : ((int)a == Dynamic) ? (int)b \
+ : ((int)b == Dynamic) ? (int)a \
+ : ((int)a <= (int)b) ? (int)a : (int)b)
+
+// see EIGEN_SIZE_MIN_PREFER_DYNAMIC. No need for a separate variant for MaxSizes here.
+#define EIGEN_SIZE_MAX(a,b) (((int)a == Dynamic || (int)b == Dynamic) ? Dynamic \
+ : ((int)a >= (int)b) ? (int)a : (int)b)
+
+#define EIGEN_LOGICAL_XOR(a,b) (((a) || (b)) && !((a) && (b)))
+
+#define EIGEN_IMPLIES(a,b) (!(a) || (b))
+
+#define EIGEN_MAKE_CWISE_BINARY_OP(METHOD,FUNCTOR) \
+ template<typename OtherDerived> \
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const CwiseBinaryOp<FUNCTOR<Scalar>, const Derived, const OtherDerived> \
+ (METHOD)(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const \
+ { \
+ return CwiseBinaryOp<FUNCTOR<Scalar>, const Derived, const OtherDerived>(derived(), other.derived()); \
+ }
+
+// the expression type of a cwise product
+#define EIGEN_CWISE_PRODUCT_RETURN_TYPE(LHS,RHS) \
+ CwiseBinaryOp< \
+ internal::scalar_product_op< \
+ typename internal::traits<LHS>::Scalar, \
+ typename internal::traits<RHS>::Scalar \
+ >, \
+ const LHS, \
+ const RHS \
+ >
+
+#endif // EIGEN_MACROS_H
diff --git a/third_party/eigen3/Eigen/src/Core/util/MatrixMapper.h b/third_party/eigen3/Eigen/src/Core/util/MatrixMapper.h
new file mode 100644
index 0000000000..ec2ad018ff
--- /dev/null
+++ b/third_party/eigen3/Eigen/src/Core/util/MatrixMapper.h
@@ -0,0 +1,155 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2014 Eric Martin <eric@ericmart.in>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_MATRIXMAPPER_H
+#define EIGEN_MATRIXMAPPER_H
+
+// To support both matrices and tensors, we need a way to abstractly access an
+// element of a matrix (where the matrix might be an implicitly flattened
+// tensor). This file abstracts the logic needed to access elements in a row
+// major or column major matrix.
+
+namespace Eigen {
+
+namespace internal {
+
+template<typename Scalar, typename Index>
+class BlasVectorMapper {
+ public:
+ EIGEN_ALWAYS_INLINE BlasVectorMapper(Scalar *data) : m_data(data) {}
+
+ EIGEN_ALWAYS_INLINE Scalar operator()(Index i) const {
+ return m_data[i];
+ }
+ template <typename Packet, int AlignmentType>
+ EIGEN_ALWAYS_INLINE Packet load(Index i) const {
+ return ploadt<Packet, AlignmentType>(m_data + i);
+ }
+
+ template <typename Packet>
+ bool aligned(Index i) const {
+ return (size_t(m_data+i)%sizeof(Packet))==0;
+ }
+
+ protected:
+ Scalar* m_data;
+};
+
+// We need a fast way to iterate down columns (if column major) that doesn't
+// involves performing a multiplication for each lookup.
+template<typename Scalar, typename Index, int AlignmentType>
+class BlasLinearMapper {
+ public:
+ typedef typename packet_traits<Scalar>::type Packet;
+ typedef typename packet_traits<Scalar>::half HalfPacket;
+
+ EIGEN_ALWAYS_INLINE BlasLinearMapper(Scalar *data) : m_data(data) {}
+
+ EIGEN_ALWAYS_INLINE void prefetch(int i) const {
+ internal::prefetch(&operator()(i));
+ }
+
+ EIGEN_ALWAYS_INLINE Scalar& operator()(Index i) const {
+ return m_data[i];
+ }
+
+ EIGEN_ALWAYS_INLINE Packet loadPacket(Index i) const {
+ return ploadt<Packet, AlignmentType>(m_data + i);
+ }
+
+ EIGEN_ALWAYS_INLINE HalfPacket loadHalfPacket(Index i) const {
+ return ploadt<HalfPacket, AlignmentType>(m_data + i);
+ }
+
+ EIGEN_ALWAYS_INLINE void storePacket(Index i, Packet p) const {
+ pstoret<Scalar, Packet, AlignmentType>(m_data + i, p);
+ }
+
+ protected:
+ Scalar* m_data;
+};
+
+// This mapper allows access into matrix by coordinates i and j.
+template<typename Scalar, typename Index, int StorageOrder, int AlignmentType = Unaligned>
+class blas_data_mapper {
+ public:
+ typedef typename packet_traits<Scalar>::type Packet;
+ typedef typename packet_traits<Scalar>::half HalfPacket;
+
+ typedef BlasLinearMapper<Scalar, Index, AlignmentType> LinearMapper;
+ typedef BlasVectorMapper<Scalar, Index> VectorMapper;
+
+ EIGEN_ALWAYS_INLINE blas_data_mapper(Scalar* data, Index stride) : m_data(data), m_stride(stride) {}
+
+ EIGEN_ALWAYS_INLINE blas_data_mapper<Scalar, Index, StorageOrder, AlignmentType>
+ getSubMapper(Index i, Index j) const {
+ return blas_data_mapper<Scalar, Index, StorageOrder, AlignmentType>(&operator()(i, j), m_stride);
+ }
+
+ EIGEN_ALWAYS_INLINE LinearMapper getLinearMapper(Index i, Index j) const {
+ return LinearMapper(&operator()(i, j));
+ }
+
+ EIGEN_ALWAYS_INLINE VectorMapper getVectorMapper(Index i, Index j) const {
+ return VectorMapper(&operator()(i, j));
+ }
+
+ EIGEN_DEVICE_FUNC
+ EIGEN_ALWAYS_INLINE Scalar& operator()(Index i, Index j) const {
+ return m_data[StorageOrder==RowMajor ? j + i*m_stride : i + j*m_stride];
+ }
+
+ EIGEN_ALWAYS_INLINE Packet loadPacket(Index i, Index j) const {
+ return ploadt<Packet, AlignmentType>(&operator()(i, j));
+ }
+
+ EIGEN_ALWAYS_INLINE HalfPacket loadHalfPacket(Index i, Index j) const {
+ return ploadt<HalfPacket, AlignmentType>(&operator()(i, j));
+ }
+
+ template<typename SubPacket>
+ EIGEN_ALWAYS_INLINE void scatterPacket(Index i, Index j, SubPacket p) const {
+ pscatter<Scalar, SubPacket>(&operator()(i, j), p, m_stride);
+ }
+
+ template<typename SubPacket>
+ EIGEN_ALWAYS_INLINE SubPacket gatherPacket(Index i, Index j) const {
+ return pgather<Scalar, SubPacket>(&operator()(i, j), m_stride);
+ }
+
+ const Index stride() const { return m_stride; }
+
+ Index firstAligned(Index size) const {
+ if (size_t(m_data)%sizeof(Scalar)) {
+ return -1;
+ }
+ return internal::first_aligned(m_data, size);
+ }
+
+ protected:
+ Scalar* EIGEN_RESTRICT m_data;
+ const Index m_stride;
+};
+
+// This is just a convienent way to work with
+// blas_data_mapper<const Scalar, Index, StorageOrder>
+template<typename Scalar, typename Index, int StorageOrder>
+class const_blas_data_mapper : public blas_data_mapper<const Scalar, Index, StorageOrder> {
+ public:
+ EIGEN_ALWAYS_INLINE const_blas_data_mapper(const Scalar *data, Index stride) : blas_data_mapper<const Scalar, Index, StorageOrder>(data, stride) {}
+
+ EIGEN_ALWAYS_INLINE const_blas_data_mapper<Scalar, Index, StorageOrder> getSubMapper(Index i, Index j) const {
+ return const_blas_data_mapper<Scalar, Index, StorageOrder>(&(this->operator()(i, j)), this->m_stride);
+ }
+};
+
+} // end namespace internal
+} // end namespace eigen
+
+#endif //EIGEN_MATRIXMAPPER_H
diff --git a/third_party/eigen3/Eigen/src/Core/util/Memory.h b/third_party/eigen3/Eigen/src/Core/util/Memory.h
new file mode 100644
index 0000000000..03a699177a
--- /dev/null
+++ b/third_party/eigen3/Eigen/src/Core/util/Memory.h
@@ -0,0 +1,984 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2008-2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2009 Kenneth Riddile <kfriddile@yahoo.com>
+// Copyright (C) 2010 Hauke Heibel <hauke.heibel@gmail.com>
+// Copyright (C) 2010 Thomas Capricelli <orzel@freehackers.org>
+// Copyright (C) 2013 Pavel Holoborodko <pavel@holoborodko.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+/*****************************************************************************
+*** Platform checks for aligned malloc functions ***
+*****************************************************************************/
+
+#ifndef EIGEN_MEMORY_H
+#define EIGEN_MEMORY_H
+
+// See bug 554 (http://eigen.tuxfamily.org/bz/show_bug.cgi?id=554)
+// It seems to be unsafe to check _POSIX_ADVISORY_INFO without including unistd.h first.
+// Currently, let's include it only on unix systems:
+#if defined(__unix__) || defined(__unix)
+ #include <unistd.h>
+ #if ((defined __QNXNTO__) || (defined _GNU_SOURCE) || ((defined _XOPEN_SOURCE) && (_XOPEN_SOURCE >= 600))) && (defined _POSIX_ADVISORY_INFO) && (_POSIX_ADVISORY_INFO > 0)
+ #define EIGEN_HAS_POSIX_MEMALIGN 1
+ #endif
+#endif
+
+#ifndef EIGEN_HAS_POSIX_MEMALIGN
+ #define EIGEN_HAS_POSIX_MEMALIGN 0
+#endif
+
+#if defined EIGEN_VECTORIZE_SSE || defined EIGEN_VECTORIZE_AVX
+ #define EIGEN_HAS_MM_MALLOC 1
+#else
+ #define EIGEN_HAS_MM_MALLOC 0
+#endif
+
+namespace Eigen {
+
+namespace internal {
+
+EIGEN_DEVICE_FUNC inline void throw_std_bad_alloc()
+{
+#ifndef __CUDA_ARCH__
+ #ifdef EIGEN_EXCEPTIONS
+ throw std::bad_alloc();
+ #else
+ std::size_t huge = static_cast<std::size_t>(-1);
+ new int[huge];
+ #endif
+#endif
+}
+
+/*****************************************************************************
+*** Implementation of handmade aligned functions ***
+*****************************************************************************/
+
+/* ----- Hand made implementations of aligned malloc/free and realloc ----- */
+
+/** \internal Like malloc, but the returned pointer is guaranteed to be 16-byte aligned.
+ * Fast, but wastes 16 additional bytes of memory. Does not throw any exception.
+ */
+inline void* handmade_aligned_malloc(std::size_t size)
+{
+ void *original = std::malloc(size+EIGEN_ALIGN_BYTES);
+ if (original == 0) return 0;
+ void *aligned = reinterpret_cast<void*>((reinterpret_cast<std::size_t>(original) & ~(std::size_t(EIGEN_ALIGN_BYTES-1))) + EIGEN_ALIGN_BYTES);
+ *(reinterpret_cast<void**>(aligned) - 1) = original;
+ return aligned;
+}
+
+/** \internal Frees memory allocated with handmade_aligned_malloc */
+inline void handmade_aligned_free(void *ptr)
+{
+ if (ptr) std::free(*(reinterpret_cast<void**>(ptr) - 1));
+}
+
+/** \internal
+ * \brief Reallocates aligned memory.
+ * Since we know that our handmade version is based on std::realloc
+ * we can use std::realloc to implement efficient reallocation.
+ */
+inline void* handmade_aligned_realloc(void* ptr, std::size_t size, std::size_t = 0)
+{
+ if (ptr == 0) return handmade_aligned_malloc(size);
+ void *original = *(reinterpret_cast<void**>(ptr) - 1);
+ std::ptrdiff_t previous_offset = static_cast<char *>(ptr)-static_cast<char *>(original);
+ original = std::realloc(original,size+EIGEN_ALIGN_BYTES);
+ if (original == 0) return 0;
+ void *aligned = reinterpret_cast<void*>((reinterpret_cast<std::size_t>(original) & ~(std::size_t(EIGEN_ALIGN_BYTES-1))) + EIGEN_ALIGN_BYTES);
+ void *previous_aligned = static_cast<char *>(original)+previous_offset;
+ if(aligned!=previous_aligned)
+ std::memmove(aligned, previous_aligned, size);
+
+ *(reinterpret_cast<void**>(aligned) - 1) = original;
+ return aligned;
+}
+
+/*****************************************************************************
+*** Implementation of generic aligned realloc (when no realloc can be used)***
+*****************************************************************************/
+
+EIGEN_DEVICE_FUNC void* aligned_malloc(std::size_t size);
+EIGEN_DEVICE_FUNC void aligned_free(void *ptr);
+
+/** \internal
+ * \brief Reallocates aligned memory.
+ * Allows reallocation with aligned ptr types. This implementation will
+ * always create a new memory chunk and copy the old data.
+ */
+inline void* generic_aligned_realloc(void* ptr, size_t size, size_t old_size)
+{
+ if (ptr==0)
+ return aligned_malloc(size);
+
+ if (size==0)
+ {
+ aligned_free(ptr);
+ return 0;
+ }
+
+ void* newptr = aligned_malloc(size);
+ if (newptr == 0)
+ {
+ #ifdef EIGEN_HAS_ERRNO
+ errno = ENOMEM; // according to the standard
+ #endif
+ return 0;
+ }
+
+ if (ptr != 0)
+ {
+ std::memcpy(newptr, ptr, (std::min)(size,old_size));
+ aligned_free(ptr);
+ }
+
+ return newptr;
+}
+
+/*****************************************************************************
+*** Implementation of portable aligned versions of malloc/free/realloc ***
+*****************************************************************************/
+
+#ifdef EIGEN_NO_MALLOC
+EIGEN_DEVICE_FUNC inline void check_that_malloc_is_allowed()
+{
+ eigen_assert(false && "heap allocation is forbidden (EIGEN_NO_MALLOC is defined)");
+}
+#elif defined EIGEN_RUNTIME_NO_MALLOC
+EIGEN_DEVICE_FUNC inline bool is_malloc_allowed_impl(bool update, bool new_value = false)
+{
+ static bool value = true;
+ if (update == 1)
+ value = new_value;
+ return value;
+}
+EIGEN_DEVICE_FUNC inline bool is_malloc_allowed() { return is_malloc_allowed_impl(false); }
+EIGEN_DEVICE_FUNC inline bool set_is_malloc_allowed(bool new_value) { return is_malloc_allowed_impl(true, new_value); }
+EIGEN_DEVICE_FUNC inline void check_that_malloc_is_allowed()
+{
+ eigen_assert(is_malloc_allowed() && "heap allocation is forbidden (EIGEN_RUNTIME_NO_MALLOC is defined and g_is_malloc_allowed is false)");
+}
+#else
+EIGEN_DEVICE_FUNC inline void check_that_malloc_is_allowed()
+{}
+#endif
+
+/** \internal Allocates \a size bytes. The returned pointer is guaranteed to have 16 or 32 bytes alignment depending on the requirements.
+ * On allocation error, the returned pointer is null, and std::bad_alloc is thrown.
+ */
+EIGEN_DEVICE_FUNC
+inline void* aligned_malloc(size_t size)
+{
+ check_that_malloc_is_allowed();
+
+ void *result;
+ #if !EIGEN_ALIGN
+ result = std::malloc(size);
+ #elif EIGEN_HAS_POSIX_MEMALIGN
+ if(posix_memalign(&result, EIGEN_ALIGN_BYTES, size)) result = 0;
+ #elif EIGEN_HAS_MM_MALLOC
+ result = _mm_malloc(size, EIGEN_ALIGN_BYTES);
+ #elif defined(_MSC_VER) && (!defined(_WIN32_WCE))
+ result = _aligned_malloc(size, EIGEN_ALIGN_BYTES);
+ #else
+ result = handmade_aligned_malloc(size);
+ #endif
+
+ if(!result && size)
+ throw_std_bad_alloc();
+
+ return result;
+}
+
+/** \internal Frees memory allocated with aligned_malloc. */
+EIGEN_DEVICE_FUNC
+inline void aligned_free(void *ptr)
+{
+ #if !EIGEN_ALIGN
+ std::free(ptr);
+ #elif EIGEN_HAS_POSIX_MEMALIGN
+ std::free(ptr);
+ #elif EIGEN_HAS_MM_MALLOC
+ _mm_free(ptr);
+ #elif defined(_MSC_VER) && (!defined(_WIN32_WCE))
+ _aligned_free(ptr);
+ #else
+ handmade_aligned_free(ptr);
+ #endif
+}
+
+/**
+* \internal
+* \brief Reallocates an aligned block of memory.
+* \throws std::bad_alloc on allocation failure
+**/
+inline void* aligned_realloc(void *ptr, size_t new_size, size_t old_size)
+{
+ EIGEN_UNUSED_VARIABLE(old_size);
+
+ void *result;
+#if !EIGEN_ALIGN
+ result = std::realloc(ptr,new_size);
+#elif EIGEN_HAS_POSIX_MEMALIGN
+ result = generic_aligned_realloc(ptr,new_size,old_size);
+#elif EIGEN_HAS_MM_MALLOC
+ // The defined(_mm_free) is just here to verify that this MSVC version
+ // implements _mm_malloc/_mm_free based on the corresponding _aligned_
+ // functions. This may not always be the case and we just try to be safe.
+ #if EIGEN_OS_WIN_STRICT && defined(_mm_free)
+ result = _aligned_realloc(ptr,new_size,EIGEN_ALIGN_BYTES);
+ #else
+ result = generic_aligned_realloc(ptr,new_size,old_size);
+ #endif
+#elif EIGEN_OS_WIN_STRICT
+ result = _aligned_realloc(ptr,new_size,EIGEN_ALIGN_BYTES);
+#else
+ result = handmade_aligned_realloc(ptr,new_size,old_size);
+#endif
+
+ if (!result && new_size)
+ throw_std_bad_alloc();
+
+ return result;
+}
+
+/*****************************************************************************
+*** Implementation of conditionally aligned functions ***
+*****************************************************************************/
+
+/** \internal Allocates \a size bytes. If Align is true, then the returned ptr is 16-byte-aligned.
+ * On allocation error, the returned pointer is null, and a std::bad_alloc is thrown.
+ */
+template<bool Align> EIGEN_DEVICE_FUNC inline void* conditional_aligned_malloc(size_t size)
+{
+ return aligned_malloc(size);
+}
+
+template<> EIGEN_DEVICE_FUNC inline void* conditional_aligned_malloc<false>(size_t size)
+{
+ check_that_malloc_is_allowed();
+
+ void *result = std::malloc(size);
+ if(!result && size)
+ throw_std_bad_alloc();
+ return result;
+}
+
+/** \internal Frees memory allocated with conditional_aligned_malloc */
+template<bool Align> EIGEN_DEVICE_FUNC inline void conditional_aligned_free(void *ptr)
+{
+ aligned_free(ptr);
+}
+
+template<> EIGEN_DEVICE_FUNC inline void conditional_aligned_free<false>(void *ptr)
+{
+ std::free(ptr);
+}
+
+template<bool Align> inline void* conditional_aligned_realloc(void* ptr, size_t new_size, size_t old_size)
+{
+ return aligned_realloc(ptr, new_size, old_size);
+}
+
+template<> inline void* conditional_aligned_realloc<false>(void* ptr, size_t new_size, size_t)
+{
+ return std::realloc(ptr, new_size);
+}
+
+/*****************************************************************************
+*** Construction/destruction of array elements ***
+*****************************************************************************/
+
+/** \internal Constructs the elements of an array.
+ * The \a size parameter tells on how many objects to call the constructor of T.
+ */
+template<typename T> EIGEN_DEVICE_FUNC inline T* construct_elements_of_array(T *ptr, size_t size)
+{
+ for (size_t i=0; i < size; ++i) ::new (ptr + i) T;
+ return ptr;
+}
+
+/** \internal Destructs the elements of an array.
+ * The \a size parameters tells on how many objects to call the destructor of T.
+ */
+template<typename T> EIGEN_DEVICE_FUNC inline void destruct_elements_of_array(T *ptr, size_t size)
+{
+ // always destruct an array starting from the end.
+ if(ptr)
+ while(size) ptr[--size].~T();
+}
+
+/*****************************************************************************
+*** Implementation of aligned new/delete-like functions ***
+*****************************************************************************/
+
+template<typename T>
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void check_size_for_overflow(size_t size)
+{
+ if(size > size_t(-1) / sizeof(T))
+ throw_std_bad_alloc();
+}
+
+/** \internal Allocates \a size objects of type T. The returned pointer is guaranteed to have 16 bytes alignment.
+ * On allocation error, the returned pointer is undefined, but a std::bad_alloc is thrown.
+ * The default constructor of T is called.
+ */
+template<typename T> EIGEN_DEVICE_FUNC inline T* aligned_new(size_t size)
+{
+ check_size_for_overflow<T>(size);
+ T *result = reinterpret_cast<T*>(aligned_malloc(sizeof(T)*size));
+ return construct_elements_of_array(result, size);
+}
+
+template<typename T, bool Align> EIGEN_DEVICE_FUNC inline T* conditional_aligned_new(size_t size)
+{
+ check_size_for_overflow<T>(size);
+ T *result = reinterpret_cast<T*>(conditional_aligned_malloc<Align>(sizeof(T)*size));
+ return construct_elements_of_array(result, size);
+}
+
+template<typename T> EIGEN_DEVICE_FUNC inline T* allocate_uvm(size_t size)
+{
+#if defined(EIGEN_USE_GPU) && defined(__CUDA_ARCH__)
+ return (T*)malloc(size);
+#elif defined(EIGEN_USE_GPU) && defined(__NVCC__)
+ T* result = NULL;
+ if (cudaMallocManaged(&result, size) != cudaSuccess) {
+ throw_std_bad_alloc();
+ }
+ return result;
+#else
+ return reinterpret_cast<T*>(conditional_aligned_malloc<true>(sizeof(T)*size));
+#endif
+}
+
+template<typename T> EIGEN_DEVICE_FUNC void deallocate_uvm(T* ptr)
+{
+#if defined(EIGEN_USE_GPU) && defined(__CUDA_ARCH__)
+ free(ptr);
+#elif defined(EIGEN_USE_GPU) && defined(__NVCC__)
+ if (cudaFree(ptr) != cudaSuccess) {
+ throw_std_bad_alloc();
+ }
+#else
+ return conditional_aligned_free<true>(ptr);
+#endif
+}
+
+/** \internal Deletes objects constructed with aligned_new
+ * The \a size parameters tells on how many objects to call the destructor of T.
+ */
+template<typename T> EIGEN_DEVICE_FUNC inline void aligned_delete(T *ptr, size_t size)
+{
+ destruct_elements_of_array<T>(ptr, size);
+ aligned_free(ptr);
+}
+
+/** \internal Deletes objects constructed with conditional_aligned_new
+ * The \a size parameters tells on how many objects to call the destructor of T.
+ */
+template<typename T, bool Align> EIGEN_DEVICE_FUNC inline void conditional_aligned_delete(T *ptr, size_t size)
+{
+ destruct_elements_of_array<T>(ptr, size);
+ conditional_aligned_free<Align>(ptr);
+}
+
+template<typename T, bool Align> EIGEN_DEVICE_FUNC inline T* conditional_aligned_realloc_new(T* pts, size_t new_size, size_t old_size)
+{
+ check_size_for_overflow<T>(new_size);
+ check_size_for_overflow<T>(old_size);
+ if(new_size < old_size)
+ destruct_elements_of_array(pts+new_size, old_size-new_size);
+ T *result = reinterpret_cast<T*>(conditional_aligned_realloc<Align>(reinterpret_cast<void*>(pts), sizeof(T)*new_size, sizeof(T)*old_size));
+ if(new_size > old_size)
+ construct_elements_of_array(result+old_size, new_size-old_size);
+ return result;
+}
+
+
+template<typename T, bool Align> EIGEN_DEVICE_FUNC inline T* conditional_aligned_new_auto(size_t size)
+{
+ check_size_for_overflow<T>(size);
+ T *result = reinterpret_cast<T*>(conditional_aligned_malloc<Align>(sizeof(T)*size));
+ if(NumTraits<T>::RequireInitialization)
+ construct_elements_of_array(result, size);
+ return result;
+}
+
+template<typename T, bool Align, bool UseUVM> EIGEN_DEVICE_FUNC inline T* conditional_managed_new_auto(size_t size)
+{
+ check_size_for_overflow<T>(size);
+ T *result;
+ if (UseUVM) {
+ result = allocate_uvm<T>(size*sizeof(T));
+ }
+ else {
+ result = reinterpret_cast<T*>(conditional_aligned_malloc<Align>(sizeof(T)*size));
+ }
+ if(NumTraits<T>::RequireInitialization)
+ construct_elements_of_array(result, size);
+ return result;
+}
+
+template<typename T, bool Align, bool UseUVM> EIGEN_DEVICE_FUNC inline void conditional_managed_delete_auto(T* ptr, size_t size)
+{
+ if(NumTraits<T>::RequireInitialization)
+ destruct_elements_of_array<T>(ptr, size);
+ if (UseUVM) {
+ deallocate_uvm(ptr);
+ }
+ else {
+ conditional_aligned_free<Align>(ptr);
+ }
+}
+
+template<typename T, bool Align> inline T* conditional_aligned_realloc_new_auto(T* pts, size_t new_size, size_t old_size)
+{
+ check_size_for_overflow<T>(new_size);
+ check_size_for_overflow<T>(old_size);
+ if(NumTraits<T>::RequireInitialization && (new_size < old_size))
+ destruct_elements_of_array(pts+new_size, old_size-new_size);
+ T *result = reinterpret_cast<T*>(conditional_aligned_realloc<Align>(reinterpret_cast<void*>(pts), sizeof(T)*new_size, sizeof(T)*old_size));
+ if(NumTraits<T>::RequireInitialization && (new_size > old_size))
+ construct_elements_of_array(result+old_size, new_size-old_size);
+ return result;
+}
+
+template<typename T, bool Align> EIGEN_DEVICE_FUNC inline void conditional_aligned_delete_auto(T *ptr, size_t size)
+{
+ if(NumTraits<T>::RequireInitialization)
+ destruct_elements_of_array<T>(ptr, size);
+ conditional_aligned_free<Align>(ptr);
+}
+
+/****************************************************************************/
+
+/** \internal Returns the index of the first element of the array that is well aligned for vectorization.
+ *
+ * \param array the address of the start of the array
+ * \param size the size of the array
+ *
+ * \note If no element of the array is well aligned, the size of the array is returned. Typically,
+ * for example with SSE, "well aligned" means 16-byte-aligned. If vectorization is disabled or if the
+ * packet size for the given scalar type is 1, then everything is considered well-aligned.
+ *
+ * \note If the scalar type is vectorizable, we rely on the following assumptions: sizeof(Scalar) is a
+ * power of 2, the packet size in bytes is also a power of 2, and is a multiple of sizeof(Scalar). On the
+ * other hand, we do not assume that the array address is a multiple of sizeof(Scalar), as that fails for
+ * example with Scalar=double on certain 32-bit platforms, see bug #79.
+ *
+ * There is also the variant first_aligned(const MatrixBase&) defined in DenseCoeffsBase.h.
+ */
+template<typename Scalar, typename Index>
+inline Index first_aligned(const Scalar* array, Index size)
+{
+ enum { PacketSize = packet_traits<Scalar>::size,
+ PacketAlignedMask = PacketSize-1
+ };
+
+ if(PacketSize==1)
+ {
+ // Either there is no vectorization, or a packet consists of exactly 1 scalar so that all elements
+ // of the array have the same alignment.
+ return 0;
+ }
+ else if(size_t(array) & (sizeof(Scalar)-1))
+ {
+ // There is vectorization for this scalar type, but the array is not aligned to the size of a single scalar.
+ // Consequently, no element of the array is well aligned.
+ return size;
+ }
+ else
+ {
+ return std::min<Index>( (PacketSize - (Index((size_t(array)/sizeof(Scalar))) & PacketAlignedMask))
+ & PacketAlignedMask, size);
+ }
+}
+
+/** \internal Returns the smallest integer multiple of \a base and greater or equal to \a size
+ */
+template<typename Index>
+inline Index first_multiple(Index size, Index base)
+{
+ return ((size+base-1)/base)*base;
+}
+
+// std::copy is much slower than memcpy, so let's introduce a smart_copy which
+// use memcpy on trivial types, i.e., on types that does not require an initialization ctor.
+template<typename T, bool UseMemcpy> struct smart_copy_helper;
+
+template<typename T> EIGEN_DEVICE_FUNC void smart_copy(const T* start, const T* end, T* target)
+{
+ smart_copy_helper<T,!NumTraits<T>::RequireInitialization>::run(start, end, target);
+}
+
+template<typename T> struct smart_copy_helper<T,true> {
+ static inline EIGEN_DEVICE_FUNC void run(const T* start, const T* end, T* target)
+ { memcpy(target, start, std::ptrdiff_t(end)-std::ptrdiff_t(start)); }
+};
+
+template<typename T> struct smart_copy_helper<T,false> {
+ static inline EIGEN_DEVICE_FUNC void run(const T* start, const T* end, T* target)
+ { std::copy(start, end, target); }
+};
+
+// intelligent memmove. falls back to std::memmove for POD types, uses std::copy otherwise.
+template<typename T, bool UseMemmove> struct smart_memmove_helper;
+
+template<typename T> void smart_memmove(const T* start, const T* end, T* target)
+{
+ smart_memmove_helper<T,!NumTraits<T>::RequireInitialization>::run(start, end, target);
+}
+
+template<typename T> struct smart_memmove_helper<T,true> {
+ static inline void run(const T* start, const T* end, T* target)
+ { std::memmove(target, start, std::ptrdiff_t(end)-std::ptrdiff_t(start)); }
+};
+
+template<typename T> struct smart_memmove_helper<T,false> {
+ static inline void run(const T* start, const T* end, T* target)
+ {
+ if (uintptr_t(target) < uintptr_t(start))
+ {
+ std::copy(start, end, target);
+ }
+ else
+ {
+ std::ptrdiff_t count = (std::ptrdiff_t(end)-std::ptrdiff_t(start)) / sizeof(T);
+ std::copy_backward(start, end, target + count);
+ }
+ }
+};
+
+
+/*****************************************************************************
+*** Implementation of runtime stack allocation (falling back to malloc) ***
+*****************************************************************************/
+
+// you can overwrite Eigen's default behavior regarding alloca by defining EIGEN_ALLOCA
+// to the appropriate stack allocation function
+#ifndef EIGEN_ALLOCA
+ #if (defined __linux__) || (defined __APPLE__)
+ #define EIGEN_ALLOCA alloca
+ #elif defined(_MSC_VER)
+ #define EIGEN_ALLOCA _alloca
+ #endif
+#endif
+
+// This helper class construct the allocated memory, and takes care of destructing and freeing the handled data
+// at destruction time. In practice this helper class is mainly useful to avoid memory leak in case of exceptions.
+template<typename T> class aligned_stack_memory_handler
+{
+ public:
+ /* Creates a stack_memory_handler responsible for the buffer \a ptr of size \a size.
+ * Note that \a ptr can be 0 regardless of the other parameters.
+ * This constructor takes care of constructing/initializing the elements of the buffer if required by the scalar type T (see NumTraits<T>::RequireInitialization).
+ * In this case, the buffer elements will also be destructed when this handler will be destructed.
+ * Finally, if \a dealloc is true, then the pointer \a ptr is freed.
+ **/
+ aligned_stack_memory_handler(T* ptr, size_t size, bool dealloc)
+ : m_ptr(ptr), m_size(size), m_deallocate(dealloc)
+ {
+ if(NumTraits<T>::RequireInitialization && m_ptr)
+ Eigen::internal::construct_elements_of_array(m_ptr, size);
+ }
+ ~aligned_stack_memory_handler()
+ {
+ if(NumTraits<T>::RequireInitialization && m_ptr)
+ Eigen::internal::destruct_elements_of_array<T>(m_ptr, m_size);
+ if(m_deallocate)
+ Eigen::internal::aligned_free(m_ptr);
+ }
+ protected:
+ T* m_ptr;
+ size_t m_size;
+ bool m_deallocate;
+};
+
+} // end namespace internal
+
+/** \internal
+ * Declares, allocates and construct an aligned buffer named NAME of SIZE elements of type TYPE on the stack
+ * if SIZE is smaller than EIGEN_STACK_ALLOCATION_LIMIT, and if stack allocation is supported by the platform
+ * (currently, this is Linux and Visual Studio only). Otherwise the memory is allocated on the heap.
+ * The allocated buffer is automatically deleted when exiting the scope of this declaration.
+ * If BUFFER is non null, then the declared variable is simply an alias for BUFFER, and no allocation/deletion occurs.
+ * Here is an example:
+ * \code
+ * {
+ * ei_declare_aligned_stack_constructed_variable(float,data,size,0);
+ * // use data[0] to data[size-1]
+ * }
+ * \endcode
+ * The underlying stack allocation function can controlled with the EIGEN_ALLOCA preprocessor token.
+ */
+#ifdef EIGEN_ALLOCA
+ // The native alloca() that comes with llvm aligns buffer on 16 bytes even when AVX is enabled.
+#if defined(__arm__) || defined(_WIN32) || EIGEN_ALIGN_BYTES > 16
+ #define EIGEN_ALIGNED_ALLOCA(SIZE) reinterpret_cast<void*>((reinterpret_cast<size_t>(EIGEN_ALLOCA(SIZE+EIGEN_ALIGN_BYTES)) & ~(size_t(EIGEN_ALIGN_BYTES-1))) + EIGEN_ALIGN_BYTES)
+ #else
+ #define EIGEN_ALIGNED_ALLOCA EIGEN_ALLOCA
+ #endif
+
+ #define ei_declare_aligned_stack_constructed_variable(TYPE,NAME,SIZE,BUFFER) \
+ Eigen::internal::check_size_for_overflow<TYPE>(SIZE); \
+ TYPE* NAME = (BUFFER)!=0 ? (BUFFER) \
+ : reinterpret_cast<TYPE*>( \
+ (sizeof(TYPE)*SIZE<=EIGEN_STACK_ALLOCATION_LIMIT) ? EIGEN_ALIGNED_ALLOCA(sizeof(TYPE)*SIZE) \
+ : Eigen::internal::aligned_malloc(sizeof(TYPE)*SIZE) ); \
+ Eigen::internal::aligned_stack_memory_handler<TYPE> EIGEN_CAT(NAME,_stack_memory_destructor)((BUFFER)==0 ? NAME : 0,SIZE,sizeof(TYPE)*SIZE>EIGEN_STACK_ALLOCATION_LIMIT)
+
+#else
+
+ #define ei_declare_aligned_stack_constructed_variable(TYPE,NAME,SIZE,BUFFER) \
+ Eigen::internal::check_size_for_overflow<TYPE>(SIZE); \
+ TYPE* NAME = (BUFFER)!=0 ? BUFFER : reinterpret_cast<TYPE*>(Eigen::internal::aligned_malloc(sizeof(TYPE)*SIZE)); \
+ Eigen::internal::aligned_stack_memory_handler<TYPE> EIGEN_CAT(NAME,_stack_memory_destructor)((BUFFER)==0 ? NAME : 0,SIZE,true)
+
+#endif
+
+
+/*****************************************************************************
+*** Implementation of EIGEN_MAKE_ALIGNED_OPERATOR_NEW [_IF] ***
+*****************************************************************************/
+
+#if EIGEN_ALIGN
+ #ifdef EIGEN_EXCEPTIONS
+ #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW(NeedsToAlign) \
+ void* operator new(size_t size, const std::nothrow_t&) throw() { \
+ try { return Eigen::internal::conditional_aligned_malloc<NeedsToAlign>(size); } \
+ catch (...) { return 0; } \
+ return 0; \
+ }
+ #else
+ #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW(NeedsToAlign) \
+ void* operator new(size_t size, const std::nothrow_t&) throw() { \
+ return Eigen::internal::conditional_aligned_malloc<NeedsToAlign>(size); \
+ }
+ #endif
+
+ #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign) \
+ void *operator new(size_t size) { \
+ return Eigen::internal::conditional_aligned_malloc<NeedsToAlign>(size); \
+ } \
+ void *operator new[](size_t size) { \
+ return Eigen::internal::conditional_aligned_malloc<NeedsToAlign>(size); \
+ } \
+ void operator delete(void * ptr) throw() { Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); } \
+ void operator delete[](void * ptr) throw() { Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); } \
+ /* in-place new and delete. since (at least afaik) there is no actual */ \
+ /* memory allocated we can safely let the default implementation handle */ \
+ /* this particular case. */ \
+ static void *operator new(size_t size, void *ptr) { return ::operator new(size,ptr); } \
+ static void *operator new[](size_t size, void* ptr) { return ::operator new[](size,ptr); } \
+ void operator delete(void * memory, void *ptr) throw() { return ::operator delete(memory,ptr); } \
+ void operator delete[](void * memory, void *ptr) throw() { return ::operator delete[](memory,ptr); } \
+ /* nothrow-new (returns zero instead of std::bad_alloc) */ \
+ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW(NeedsToAlign) \
+ void operator delete(void *ptr, const std::nothrow_t&) throw() { \
+ Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); \
+ } \
+ typedef void eigen_aligned_operator_new_marker_type;
+#else
+ #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign)
+#endif
+
+#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(true)
+#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(Scalar,Size) \
+ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(bool(((Size)!=Eigen::Dynamic) && ((sizeof(Scalar)*(Size))%EIGEN_ALIGN_BYTES==0)))
+
+/****************************************************************************/
+
+/** \class aligned_allocator
+* \ingroup Core_Module
+*
+* \brief STL compatible allocator to use with with 16 byte aligned types
+*
+* Example:
+* \code
+* // Matrix4f requires 16 bytes alignment:
+* std::map< int, Matrix4f, std::less<int>,
+* aligned_allocator<std::pair<const int, Matrix4f> > > my_map_mat4;
+* // Vector3f does not require 16 bytes alignment, no need to use Eigen's allocator:
+* std::map< int, Vector3f > my_map_vec3;
+* \endcode
+*
+* \sa \ref TopicStlContainers.
+*/
+template<class T>
+class aligned_allocator : public std::allocator<T>
+{
+public:
+ typedef size_t size_type;
+ typedef std::ptrdiff_t difference_type;
+ typedef T* pointer;
+ typedef const T* const_pointer;
+ typedef T& reference;
+ typedef const T& const_reference;
+ typedef T value_type;
+
+ template<class U>
+ struct rebind
+ {
+ typedef aligned_allocator<U> other;
+ };
+
+ aligned_allocator() : std::allocator<T>() {}
+
+ aligned_allocator(const aligned_allocator& other) : std::allocator<T>(other) {}
+
+ template<class U>
+ aligned_allocator(const aligned_allocator<U>& other) : std::allocator<T>(other) {}
+
+ ~aligned_allocator() {}
+
+ pointer allocate(size_type num, const void* /*hint*/ = 0)
+ {
+ internal::check_size_for_overflow<T>(num);
+ return static_cast<pointer>( internal::aligned_malloc(num * sizeof(T)) );
+ }
+
+ void deallocate(pointer p, size_type /*num*/)
+ {
+ internal::aligned_free(p);
+ }
+};
+
+//---------- Cache sizes ----------
+
+#if !defined(EIGEN_NO_CPUID)
+# if EIGEN_COMP_GNUC && EIGEN_ARCH_i386_OR_x86_64
+# if defined(__PIC__) && EIGEN_ARCH_i386
+ // Case for x86 with PIC
+# define EIGEN_CPUID(abcd,func,id) \
+ __asm__ __volatile__ ("xchgl %%ebx, %k1;cpuid; xchgl %%ebx,%k1": "=a" (abcd[0]), "=&r" (abcd[1]), "=c" (abcd[2]), "=d" (abcd[3]) : "a" (func), "c" (id));
+# elif defined(__PIC__) && EIGEN_ARCH_x86_64
+ // Case for x64 with PIC. In theory this is only a problem with recent gcc and with medium or large code model, not with the default small code model.
+ // However, we cannot detect which code model is used, and the xchg overhead is negligible anyway.
+# define EIGEN_CPUID(abcd,func,id) \
+ __asm__ __volatile__ ("xchg{q}\t{%%}rbx, %q1; cpuid; xchg{q}\t{%%}rbx, %q1": "=a" (abcd[0]), "=&r" (abcd[1]), "=c" (abcd[2]), "=d" (abcd[3]) : "0" (func), "2" (id));
+# else
+ // Case for x86_64 or x86 w/o PIC
+# define EIGEN_CPUID(abcd,func,id) \
+ __asm__ __volatile__ ("cpuid": "=a" (abcd[0]), "=b" (abcd[1]), "=c" (abcd[2]), "=d" (abcd[3]) : "0" (func), "2" (id) );
+# endif
+# elif EIGEN_COMP_MSVC
+# if (EIGEN_COMP_MSVC > 1500) && EIGEN_ARCH_i386_OR_x86_64
+# define EIGEN_CPUID(abcd,func,id) __cpuidex((int*)abcd,func,id)
+# endif
+# endif
+#endif
+
+namespace internal {
+
+#ifdef EIGEN_CPUID
+
+inline bool cpuid_is_vendor(int abcd[4], const char* vendor)
+{
+ return abcd[1]==(reinterpret_cast<const int*>(vendor))[0] && abcd[3]==(reinterpret_cast<const int*>(vendor))[1] && abcd[2]==(reinterpret_cast<const int*>(vendor))[2];
+}
+
+inline void queryCacheSizes_intel_direct(int& l1, int& l2, int& l3)
+{
+ int abcd[4];
+ l1 = l2 = l3 = 0;
+ int cache_id = 0;
+ int cache_type = 0;
+ do {
+ abcd[0] = abcd[1] = abcd[2] = abcd[3] = 0;
+ EIGEN_CPUID(abcd,0x4,cache_id);
+ cache_type = (abcd[0] & 0x0F) >> 0;
+ if(cache_type==1||cache_type==3) // data or unified cache
+ {
+ int cache_level = (abcd[0] & 0xE0) >> 5; // A[7:5]
+ int ways = (abcd[1] & 0xFFC00000) >> 22; // B[31:22]
+ int partitions = (abcd[1] & 0x003FF000) >> 12; // B[21:12]
+ int line_size = (abcd[1] & 0x00000FFF) >> 0; // B[11:0]
+ int sets = (abcd[2]); // C[31:0]
+
+ int cache_size = (ways+1) * (partitions+1) * (line_size+1) * (sets+1);
+
+ switch(cache_level)
+ {
+ case 1: l1 = cache_size; break;
+ case 2: l2 = cache_size; break;
+ case 3: l3 = cache_size; break;
+ default: break;
+ }
+ }
+ cache_id++;
+ } while(cache_type>0 && cache_id<16);
+}
+
+inline void queryCacheSizes_intel_codes(int& l1, int& l2, int& l3)
+{
+ int abcd[4];
+ abcd[0] = abcd[1] = abcd[2] = abcd[3] = 0;
+ l1 = l2 = l3 = 0;
+ EIGEN_CPUID(abcd,0x00000002,0);
+ unsigned char * bytes = reinterpret_cast<unsigned char *>(abcd)+2;
+ bool check_for_p2_core2 = false;
+ for(int i=0; i<14; ++i)
+ {
+ switch(bytes[i])
+ {
+ case 0x0A: l1 = 8; break; // 0Ah data L1 cache, 8 KB, 2 ways, 32 byte lines
+ case 0x0C: l1 = 16; break; // 0Ch data L1 cache, 16 KB, 4 ways, 32 byte lines
+ case 0x0E: l1 = 24; break; // 0Eh data L1 cache, 24 KB, 6 ways, 64 byte lines
+ case 0x10: l1 = 16; break; // 10h data L1 cache, 16 KB, 4 ways, 32 byte lines (IA-64)
+ case 0x15: l1 = 16; break; // 15h code L1 cache, 16 KB, 4 ways, 32 byte lines (IA-64)
+ case 0x2C: l1 = 32; break; // 2Ch data L1 cache, 32 KB, 8 ways, 64 byte lines
+ case 0x30: l1 = 32; break; // 30h code L1 cache, 32 KB, 8 ways, 64 byte lines
+ case 0x60: l1 = 16; break; // 60h data L1 cache, 16 KB, 8 ways, 64 byte lines, sectored
+ case 0x66: l1 = 8; break; // 66h data L1 cache, 8 KB, 4 ways, 64 byte lines, sectored
+ case 0x67: l1 = 16; break; // 67h data L1 cache, 16 KB, 4 ways, 64 byte lines, sectored
+ case 0x68: l1 = 32; break; // 68h data L1 cache, 32 KB, 4 ways, 64 byte lines, sectored
+ case 0x1A: l2 = 96; break; // code and data L2 cache, 96 KB, 6 ways, 64 byte lines (IA-64)
+ case 0x22: l3 = 512; break; // code and data L3 cache, 512 KB, 4 ways (!), 64 byte lines, dual-sectored
+ case 0x23: l3 = 1024; break; // code and data L3 cache, 1024 KB, 8 ways, 64 byte lines, dual-sectored
+ case 0x25: l3 = 2048; break; // code and data L3 cache, 2048 KB, 8 ways, 64 byte lines, dual-sectored
+ case 0x29: l3 = 4096; break; // code and data L3 cache, 4096 KB, 8 ways, 64 byte lines, dual-sectored
+ case 0x39: l2 = 128; break; // code and data L2 cache, 128 KB, 4 ways, 64 byte lines, sectored
+ case 0x3A: l2 = 192; break; // code and data L2 cache, 192 KB, 6 ways, 64 byte lines, sectored
+ case 0x3B: l2 = 128; break; // code and data L2 cache, 128 KB, 2 ways, 64 byte lines, sectored
+ case 0x3C: l2 = 256; break; // code and data L2 cache, 256 KB, 4 ways, 64 byte lines, sectored
+ case 0x3D: l2 = 384; break; // code and data L2 cache, 384 KB, 6 ways, 64 byte lines, sectored
+ case 0x3E: l2 = 512; break; // code and data L2 cache, 512 KB, 4 ways, 64 byte lines, sectored
+ case 0x40: l2 = 0; break; // no integrated L2 cache (P6 core) or L3 cache (P4 core)
+ case 0x41: l2 = 128; break; // code and data L2 cache, 128 KB, 4 ways, 32 byte lines
+ case 0x42: l2 = 256; break; // code and data L2 cache, 256 KB, 4 ways, 32 byte lines
+ case 0x43: l2 = 512; break; // code and data L2 cache, 512 KB, 4 ways, 32 byte lines
+ case 0x44: l2 = 1024; break; // code and data L2 cache, 1024 KB, 4 ways, 32 byte lines
+ case 0x45: l2 = 2048; break; // code and data L2 cache, 2048 KB, 4 ways, 32 byte lines
+ case 0x46: l3 = 4096; break; // code and data L3 cache, 4096 KB, 4 ways, 64 byte lines
+ case 0x47: l3 = 8192; break; // code and data L3 cache, 8192 KB, 8 ways, 64 byte lines
+ case 0x48: l2 = 3072; break; // code and data L2 cache, 3072 KB, 12 ways, 64 byte lines
+ case 0x49: if(l2!=0) l3 = 4096; else {check_for_p2_core2=true; l3 = l2 = 4096;} break;// code and data L3 cache, 4096 KB, 16 ways, 64 byte lines (P4) or L2 for core2
+ case 0x4A: l3 = 6144; break; // code and data L3 cache, 6144 KB, 12 ways, 64 byte lines
+ case 0x4B: l3 = 8192; break; // code and data L3 cache, 8192 KB, 16 ways, 64 byte lines
+ case 0x4C: l3 = 12288; break; // code and data L3 cache, 12288 KB, 12 ways, 64 byte lines
+ case 0x4D: l3 = 16384; break; // code and data L3 cache, 16384 KB, 16 ways, 64 byte lines
+ case 0x4E: l2 = 6144; break; // code and data L2 cache, 6144 KB, 24 ways, 64 byte lines
+ case 0x78: l2 = 1024; break; // code and data L2 cache, 1024 KB, 4 ways, 64 byte lines
+ case 0x79: l2 = 128; break; // code and data L2 cache, 128 KB, 8 ways, 64 byte lines, dual-sectored
+ case 0x7A: l2 = 256; break; // code and data L2 cache, 256 KB, 8 ways, 64 byte lines, dual-sectored
+ case 0x7B: l2 = 512; break; // code and data L2 cache, 512 KB, 8 ways, 64 byte lines, dual-sectored
+ case 0x7C: l2 = 1024; break; // code and data L2 cache, 1024 KB, 8 ways, 64 byte lines, dual-sectored
+ case 0x7D: l2 = 2048; break; // code and data L2 cache, 2048 KB, 8 ways, 64 byte lines
+ case 0x7E: l2 = 256; break; // code and data L2 cache, 256 KB, 8 ways, 128 byte lines, sect. (IA-64)
+ case 0x7F: l2 = 512; break; // code and data L2 cache, 512 KB, 2 ways, 64 byte lines
+ case 0x80: l2 = 512; break; // code and data L2 cache, 512 KB, 8 ways, 64 byte lines
+ case 0x81: l2 = 128; break; // code and data L2 cache, 128 KB, 8 ways, 32 byte lines
+ case 0x82: l2 = 256; break; // code and data L2 cache, 256 KB, 8 ways, 32 byte lines
+ case 0x83: l2 = 512; break; // code and data L2 cache, 512 KB, 8 ways, 32 byte lines
+ case 0x84: l2 = 1024; break; // code and data L2 cache, 1024 KB, 8 ways, 32 byte lines
+ case 0x85: l2 = 2048; break; // code and data L2 cache, 2048 KB, 8 ways, 32 byte lines
+ case 0x86: l2 = 512; break; // code and data L2 cache, 512 KB, 4 ways, 64 byte lines
+ case 0x87: l2 = 1024; break; // code and data L2 cache, 1024 KB, 8 ways, 64 byte lines
+ case 0x88: l3 = 2048; break; // code and data L3 cache, 2048 KB, 4 ways, 64 byte lines (IA-64)
+ case 0x89: l3 = 4096; break; // code and data L3 cache, 4096 KB, 4 ways, 64 byte lines (IA-64)
+ case 0x8A: l3 = 8192; break; // code and data L3 cache, 8192 KB, 4 ways, 64 byte lines (IA-64)
+ case 0x8D: l3 = 3072; break; // code and data L3 cache, 3072 KB, 12 ways, 128 byte lines (IA-64)
+
+ default: break;
+ }
+ }
+ if(check_for_p2_core2 && l2 == l3)
+ l3 = 0;
+ l1 *= 1024;
+ l2 *= 1024;
+ l3 *= 1024;
+}
+
+inline void queryCacheSizes_intel(int& l1, int& l2, int& l3, int max_std_funcs)
+{
+ if(max_std_funcs>=4)
+ queryCacheSizes_intel_direct(l1,l2,l3);
+ else
+ queryCacheSizes_intel_codes(l1,l2,l3);
+}
+
+inline void queryCacheSizes_amd(int& l1, int& l2, int& l3)
+{
+ int abcd[4];
+ abcd[0] = abcd[1] = abcd[2] = abcd[3] = 0;
+ EIGEN_CPUID(abcd,0x80000005,0);
+ l1 = (abcd[2] >> 24) * 1024; // C[31:24] = L1 size in KB
+ abcd[0] = abcd[1] = abcd[2] = abcd[3] = 0;
+ EIGEN_CPUID(abcd,0x80000006,0);
+ l2 = (abcd[2] >> 16) * 1024; // C[31;16] = l2 cache size in KB
+ l3 = ((abcd[3] & 0xFFFC000) >> 18) * 512 * 1024; // D[31;18] = l3 cache size in 512KB
+}
+#endif
+
+/** \internal
+ * Queries and returns the cache sizes in Bytes of the L1, L2, and L3 data caches respectively */
+inline void queryCacheSizes(int& l1, int& l2, int& l3)
+{
+ #ifdef EIGEN_CPUID
+ int abcd[4];
+
+ // identify the CPU vendor
+ EIGEN_CPUID(abcd,0x0,0);
+ int max_std_funcs = abcd[1];
+ if(cpuid_is_vendor(abcd,"GenuineIntel"))
+ queryCacheSizes_intel(l1,l2,l3,max_std_funcs);
+ else if(cpuid_is_vendor(abcd,"AuthenticAMD") || cpuid_is_vendor(abcd,"AMDisbetter!"))
+ queryCacheSizes_amd(l1,l2,l3);
+ else
+ // by default let's use Intel's API
+ queryCacheSizes_intel(l1,l2,l3,max_std_funcs);
+
+ // here is the list of other vendors:
+// ||cpuid_is_vendor(abcd,"VIA VIA VIA ")
+// ||cpuid_is_vendor(abcd,"CyrixInstead")
+// ||cpuid_is_vendor(abcd,"CentaurHauls")
+// ||cpuid_is_vendor(abcd,"GenuineTMx86")
+// ||cpuid_is_vendor(abcd,"TransmetaCPU")
+// ||cpuid_is_vendor(abcd,"RiseRiseRise")
+// ||cpuid_is_vendor(abcd,"Geode by NSC")
+// ||cpuid_is_vendor(abcd,"SiS SiS SiS ")
+// ||cpuid_is_vendor(abcd,"UMC UMC UMC ")
+// ||cpuid_is_vendor(abcd,"NexGenDriven")
+ #else
+ l1 = l2 = l3 = -1;
+ #endif
+}
+
+/** \internal
+ * \returns the size in Bytes of the L1 data cache */
+inline int queryL1CacheSize()
+{
+ int l1(-1), l2, l3;
+ queryCacheSizes(l1,l2,l3);
+ return l1;
+}
+
+inline int queryL2CacheSize()
+{
+ int l1, l2(-1), l3;
+ queryCacheSizes(l1,l2,l3);
+ return l2;
+}
+
+/** \internal
+ * \returns the size in Bytes of the L2 or L3 cache if this later is present */
+inline int queryTopLevelCacheSize()
+{
+ int l1, l2(-1), l3(-1);
+ queryCacheSizes(l1,l2,l3);
+ return (std::max)(l2,l3);
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_MEMORY_H
diff --git a/third_party/eigen3/Eigen/src/Core/util/Meta.h b/third_party/eigen3/Eigen/src/Core/util/Meta.h
new file mode 100644
index 0000000000..7576b32689
--- /dev/null
+++ b/third_party/eigen3/Eigen/src/Core/util/Meta.h
@@ -0,0 +1,334 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_META_H
+#define EIGEN_META_H
+
+#if defined(__CUDA_ARCH__) && !defined(__GCUDACC__)
+#include <math_constants.h>
+#endif
+
+namespace Eigen {
+
+namespace internal {
+
+/** \internal
+ * \file Meta.h
+ * This file contains generic metaprogramming classes which are not specifically related to Eigen.
+ * \note In case you wonder, yes we're aware that Boost already provides all these features,
+ * we however don't want to add a dependency to Boost.
+ */
+
+struct true_type { enum { value = 1 }; };
+struct false_type { enum { value = 0 }; };
+
+template<bool Condition, typename Then, typename Else>
+struct conditional { typedef Then type; };
+
+template<typename Then, typename Else>
+struct conditional <false, Then, Else> { typedef Else type; };
+
+template<typename T, typename U> struct is_same { enum { value = 0 }; };
+template<typename T> struct is_same<T,T> { enum { value = 1 }; };
+
+template<typename T> struct remove_reference { typedef T type; };
+template<typename T> struct remove_reference<T&> { typedef T type; };
+
+template<typename T> struct remove_pointer { typedef T type; };
+template<typename T> struct remove_pointer<T*> { typedef T type; };
+template<typename T> struct remove_pointer<T*const> { typedef T type; };
+
+template <class T> struct remove_const { typedef T type; };
+template <class T> struct remove_const<const T> { typedef T type; };
+template <class T> struct remove_const<const T[]> { typedef T type[]; };
+template <class T, unsigned int Size> struct remove_const<const T[Size]> { typedef T type[Size]; };
+
+template<typename T> struct remove_all { typedef T type; };
+template<typename T> struct remove_all<const T> { typedef typename remove_all<T>::type type; };
+template<typename T> struct remove_all<T const&> { typedef typename remove_all<T>::type type; };
+template<typename T> struct remove_all<T&> { typedef typename remove_all<T>::type type; };
+template<typename T> struct remove_all<T const*> { typedef typename remove_all<T>::type type; };
+template<typename T> struct remove_all<T*> { typedef typename remove_all<T>::type type; };
+
+template<typename T> struct is_arithmetic { enum { value = false }; };
+template<> struct is_arithmetic<float> { enum { value = true }; };
+template<> struct is_arithmetic<double> { enum { value = true }; };
+template<> struct is_arithmetic<long double> { enum { value = true }; };
+template<> struct is_arithmetic<bool> { enum { value = true }; };
+template<> struct is_arithmetic<char> { enum { value = true }; };
+template<> struct is_arithmetic<signed char> { enum { value = true }; };
+template<> struct is_arithmetic<unsigned char> { enum { value = true }; };
+template<> struct is_arithmetic<signed short> { enum { value = true }; };
+template<> struct is_arithmetic<unsigned short>{ enum { value = true }; };
+template<> struct is_arithmetic<signed int> { enum { value = true }; };
+template<> struct is_arithmetic<unsigned int> { enum { value = true }; };
+template<> struct is_arithmetic<signed long> { enum { value = true }; };
+template<> struct is_arithmetic<unsigned long> { enum { value = true }; };
+
+template <typename T> struct add_const { typedef const T type; };
+template <typename T> struct add_const<T&> { typedef T& type; };
+
+template <typename T> struct is_const { enum { value = 0 }; };
+template <typename T> struct is_const<T const> { enum { value = 1 }; };
+
+template<typename T> struct add_const_on_value_type { typedef const T type; };
+template<typename T> struct add_const_on_value_type<T&> { typedef T const& type; };
+template<typename T> struct add_const_on_value_type<T*> { typedef T const* type; };
+template<typename T> struct add_const_on_value_type<T* const> { typedef T const* const type; };
+template<typename T> struct add_const_on_value_type<T const* const> { typedef T const* const type; };
+
+/** \internal Allows to enable/disable an overload
+ * according to a compile time condition.
+ */
+template<bool Condition, typename T> struct enable_if;
+
+template<typename T> struct enable_if<true,T>
+{ typedef T type; };
+
+#if defined(__CUDA_ARCH__) && !defined(__GCUDACC__)
+
+namespace device {
+
+template<typename T> struct numeric_limits
+{
+ EIGEN_DEVICE_FUNC
+ static T epsilon() { return 0; }
+ static T max() { assert(false && "Max not suppoted for this type"); }
+ static T lowest() { assert(false && "Lowest not suppoted for this type"); }
+};
+template<> struct numeric_limits<float>
+{
+ EIGEN_DEVICE_FUNC
+ static float epsilon() { return __FLT_EPSILON__; }
+ EIGEN_DEVICE_FUNC
+ static float max() { return CUDART_MAX_NORMAL_F; }
+ EIGEN_DEVICE_FUNC
+ static float lowest() { return -CUDART_MAX_NORMAL_F; }
+};
+template<> struct numeric_limits<double>
+{
+ EIGEN_DEVICE_FUNC
+ static double epsilon() { return __DBL_EPSILON__; }
+ EIGEN_DEVICE_FUNC
+ static double max() { return CUDART_INF; }
+ EIGEN_DEVICE_FUNC
+ static double lowest() { return -CUDART_INF; }
+};
+template<> struct numeric_limits<int>
+{
+ EIGEN_DEVICE_FUNC
+ static int epsilon() { return 0; }
+ EIGEN_DEVICE_FUNC
+ static int max() { return INT_MAX; }
+ EIGEN_DEVICE_FUNC
+ static int lowest() { return INT_MIN; }
+};
+template<> struct numeric_limits<long>
+{
+ EIGEN_DEVICE_FUNC
+ static long epsilon() { return 0; }
+ EIGEN_DEVICE_FUNC
+ static long max() { return LONG_MAX; }
+ EIGEN_DEVICE_FUNC
+ static long lowest() { return LONG_MIN; }
+};
+template<> struct numeric_limits<long long>
+{
+ EIGEN_DEVICE_FUNC
+ static long long epsilon() { return 0; }
+ EIGEN_DEVICE_FUNC
+ static long long max() { return LLONG_MAX; }
+ EIGEN_DEVICE_FUNC
+ static long long lowest() { return LLONG_MIN; }
+};
+
+}
+
+#endif
+
+/** \internal
+ * A base class do disable default copy ctor and copy assignement operator.
+ */
+class noncopyable
+{
+ noncopyable(const noncopyable&);
+ const noncopyable& operator=(const noncopyable&);
+protected:
+ noncopyable() {}
+ ~noncopyable() {}
+};
+
+
+/** \internal
+ * Convenient struct to get the result type of a unary or binary functor.
+ *
+ * It supports both the current STL mechanism (using the result_type member) as well as
+ * upcoming next STL generation (using a templated result member).
+ * If none of these members is provided, then the type of the first argument is returned. FIXME, that behavior is a pretty bad hack.
+ */
+template<typename T> struct result_of {};
+
+struct has_none {int a[1];};
+struct has_std_result_type {int a[2];};
+struct has_tr1_result {int a[3];};
+
+template<typename Func, typename ArgType, int SizeOf=sizeof(has_none)>
+struct unary_result_of_select {typedef ArgType type;};
+
+template<typename Func, typename ArgType>
+struct unary_result_of_select<Func, ArgType, sizeof(has_std_result_type)> {typedef typename Func::result_type type;};
+
+template<typename Func, typename ArgType>
+struct unary_result_of_select<Func, ArgType, sizeof(has_tr1_result)> {typedef typename Func::template result<Func(ArgType)>::type type;};
+
+template<typename Func, typename ArgType>
+struct result_of<Func(ArgType)> {
+ template<typename T>
+ static has_std_result_type testFunctor(T const *, typename T::result_type const * = 0);
+ template<typename T>
+ static has_tr1_result testFunctor(T const *, typename T::template result<T(ArgType)>::type const * = 0);
+ static has_none testFunctor(...);
+
+ // note that the following indirection is needed for gcc-3.3
+ enum {FunctorType = sizeof(testFunctor(static_cast<Func*>(0)))};
+ typedef typename unary_result_of_select<Func, ArgType, FunctorType>::type type;
+};
+
+template<typename Func, typename ArgType0, typename ArgType1, int SizeOf=sizeof(has_none)>
+struct binary_result_of_select {typedef ArgType0 type;};
+
+template<typename Func, typename ArgType0, typename ArgType1>
+struct binary_result_of_select<Func, ArgType0, ArgType1, sizeof(has_std_result_type)>
+{typedef typename Func::result_type type;};
+
+template<typename Func, typename ArgType0, typename ArgType1>
+struct binary_result_of_select<Func, ArgType0, ArgType1, sizeof(has_tr1_result)>
+{typedef typename Func::template result<Func(ArgType0,ArgType1)>::type type;};
+
+template<typename Func, typename ArgType0, typename ArgType1>
+struct result_of<Func(ArgType0,ArgType1)> {
+ template<typename T>
+ static has_std_result_type testFunctor(T const *, typename T::result_type const * = 0);
+ template<typename T>
+ static has_tr1_result testFunctor(T const *, typename T::template result<T(ArgType0,ArgType1)>::type const * = 0);
+ static has_none testFunctor(...);
+
+ // note that the following indirection is needed for gcc-3.3
+ enum {FunctorType = sizeof(testFunctor(static_cast<Func*>(0)))};
+ typedef typename binary_result_of_select<Func, ArgType0, ArgType1, FunctorType>::type type;
+};
+
+/** \internal In short, it computes int(sqrt(\a Y)) with \a Y an integer.
+ * Usage example: \code meta_sqrt<1023>::ret \endcode
+ */
+template<int Y,
+ int InfX = 0,
+ int SupX = ((Y==1) ? 1 : Y/2),
+ bool Done = ((SupX-InfX)<=1 ? true : ((SupX*SupX <= Y) && ((SupX+1)*(SupX+1) > Y))) >
+ // use ?: instead of || just to shut up a stupid gcc 4.3 warning
+class meta_sqrt
+{
+ enum {
+ MidX = (InfX+SupX)/2,
+ TakeInf = MidX*MidX > Y ? 1 : 0,
+ NewInf = int(TakeInf) ? InfX : int(MidX),
+ NewSup = int(TakeInf) ? int(MidX) : SupX
+ };
+ public:
+ enum { ret = meta_sqrt<Y,NewInf,NewSup>::ret };
+};
+
+template<int Y, int InfX, int SupX>
+class meta_sqrt<Y, InfX, SupX, true> { public: enum { ret = (SupX*SupX <= Y) ? SupX : InfX }; };
+
+/** \internal determines whether the product of two numeric types is allowed and what the return type is */
+template<typename T, typename U> struct scalar_product_traits
+{
+ enum { Defined = 0 };
+};
+
+template<typename T> struct scalar_product_traits<T,T>
+{
+ enum {
+ // Cost = NumTraits<T>::MulCost,
+ Defined = 1
+ };
+ typedef T ReturnType;
+};
+
+template<typename T> struct scalar_product_traits<T, const T>
+{
+ enum {
+ // Cost = NumTraits<T>::MulCost,
+ Defined = 1
+ };
+ typedef T ReturnType;
+};
+
+template<typename T> struct scalar_product_traits<const T, T>
+{
+ enum {
+ // Cost = NumTraits<T>::MulCost,
+ Defined = 1
+ };
+ typedef T ReturnType;
+};
+
+template<typename T> struct scalar_product_traits<T,std::complex<T> >
+{
+ enum {
+ // Cost = 2*NumTraits<T>::MulCost,
+ Defined = 1
+ };
+ typedef std::complex<T> ReturnType;
+};
+
+template<typename T> struct scalar_product_traits<std::complex<T>, T>
+{
+ enum {
+ // Cost = 2*NumTraits<T>::MulCost,
+ Defined = 1
+ };
+ typedef std::complex<T> ReturnType;
+};
+
+// FIXME quick workaround around current limitation of result_of
+// template<typename Scalar, typename ArgType0, typename ArgType1>
+// struct result_of<scalar_product_op<Scalar>(ArgType0,ArgType1)> {
+// typedef typename scalar_product_traits<typename remove_all<ArgType0>::type, typename remove_all<ArgType1>::type>::ReturnType type;
+// };
+
+template<typename T> struct is_diagonal
+{ enum { ret = false }; };
+
+template<typename T> struct is_diagonal<DiagonalBase<T> >
+{ enum { ret = true }; };
+
+template<typename T> struct is_diagonal<DiagonalWrapper<T> >
+{ enum { ret = true }; };
+
+template<typename T, int S> struct is_diagonal<DiagonalMatrix<T,S> >
+{ enum { ret = true }; };
+
+} // end namespace internal
+
+namespace numext {
+
+#if defined(__CUDA_ARCH__)
+template<typename T> EIGEN_DEVICE_FUNC void swap(T &a, T &b) { T tmp = b; b = a; a = tmp; }
+#else
+template<typename T> EIGEN_STRONG_INLINE void swap(T &a, T &b) { std::swap(a,b); }
+#endif
+
+} // end namespace numext
+
+} // end namespace Eigen
+
+#endif // EIGEN_META_H
diff --git a/third_party/eigen3/Eigen/src/Core/util/ReenableStupidWarnings.h b/third_party/eigen3/Eigen/src/Core/util/ReenableStupidWarnings.h
new file mode 100644
index 0000000000..5ddfbd4aa6
--- /dev/null
+++ b/third_party/eigen3/Eigen/src/Core/util/ReenableStupidWarnings.h
@@ -0,0 +1,14 @@
+#ifdef EIGEN_WARNINGS_DISABLED
+#undef EIGEN_WARNINGS_DISABLED
+
+#ifndef EIGEN_PERMANENTLY_DISABLE_STUPID_WARNINGS
+ #ifdef _MSC_VER
+ #pragma warning( pop )
+ #elif defined __INTEL_COMPILER
+ #pragma warning pop
+ #elif defined __clang__
+ #pragma clang diagnostic pop
+ #endif
+#endif
+
+#endif // EIGEN_WARNINGS_DISABLED
diff --git a/third_party/eigen3/Eigen/src/Core/util/StaticAssert.h b/third_party/eigen3/Eigen/src/Core/util/StaticAssert.h
new file mode 100644
index 0000000000..396e27b900
--- /dev/null
+++ b/third_party/eigen3/Eigen/src/Core/util/StaticAssert.h
@@ -0,0 +1,206 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_STATIC_ASSERT_H
+#define EIGEN_STATIC_ASSERT_H
+
+/* Some notes on Eigen's static assertion mechanism:
+ *
+ * - in EIGEN_STATIC_ASSERT(CONDITION,MSG) the parameter CONDITION must be a compile time boolean
+ * expression, and MSG an enum listed in struct internal::static_assertion<true>
+ *
+ * - define EIGEN_NO_STATIC_ASSERT to disable them (and save compilation time)
+ * in that case, the static assertion is converted to the following runtime assert:
+ * eigen_assert(CONDITION && "MSG")
+ *
+ * - currently EIGEN_STATIC_ASSERT can only be used in function scope
+ *
+ */
+
+#ifndef EIGEN_NO_STATIC_ASSERT
+
+ #if defined(__GXX_EXPERIMENTAL_CXX0X__) || (EIGEN_COMP_MSVC >= 1600)
+
+ // if native static_assert is enabled, let's use it
+ #define EIGEN_STATIC_ASSERT(X,MSG) static_assert(X,#MSG);
+
+ #else // not CXX0X
+
+ namespace Eigen {
+
+ namespace internal {
+
+ template<bool condition>
+ struct static_assertion {};
+
+ template<>
+ struct static_assertion<true>
+ {
+ enum {
+ YOU_TRIED_CALLING_A_VECTOR_METHOD_ON_A_MATRIX,
+ YOU_MIXED_VECTORS_OF_DIFFERENT_SIZES,
+ YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES,
+ THIS_METHOD_IS_ONLY_FOR_VECTORS_OF_A_SPECIFIC_SIZE,
+ THIS_METHOD_IS_ONLY_FOR_MATRICES_OF_A_SPECIFIC_SIZE,
+ THIS_METHOD_IS_ONLY_FOR_OBJECTS_OF_A_SPECIFIC_SIZE,
+ YOU_MADE_A_PROGRAMMING_MISTAKE,
+ EIGEN_INTERNAL_ERROR_PLEASE_FILE_A_BUG_REPORT,
+ EIGEN_INTERNAL_COMPILATION_ERROR_OR_YOU_MADE_A_PROGRAMMING_MISTAKE,
+ YOU_CALLED_A_FIXED_SIZE_METHOD_ON_A_DYNAMIC_SIZE_MATRIX_OR_VECTOR,
+ YOU_CALLED_A_DYNAMIC_SIZE_METHOD_ON_A_FIXED_SIZE_MATRIX_OR_VECTOR,
+ UNALIGNED_LOAD_AND_STORE_OPERATIONS_UNIMPLEMENTED_ON_ALTIVEC,
+ THIS_FUNCTION_IS_NOT_FOR_INTEGER_NUMERIC_TYPES,
+ FLOATING_POINT_ARGUMENT_PASSED__INTEGER_WAS_EXPECTED,
+ NUMERIC_TYPE_MUST_BE_REAL,
+ COEFFICIENT_WRITE_ACCESS_TO_SELFADJOINT_NOT_SUPPORTED,
+ WRITING_TO_TRIANGULAR_PART_WITH_UNIT_DIAGONAL_IS_NOT_SUPPORTED,
+ THIS_METHOD_IS_ONLY_FOR_FIXED_SIZE,
+ INVALID_MATRIX_PRODUCT,
+ INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS,
+ INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION,
+ YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY,
+ THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES,
+ THIS_METHOD_IS_ONLY_FOR_ROW_MAJOR_MATRICES,
+ INVALID_MATRIX_TEMPLATE_PARAMETERS,
+ INVALID_MATRIXBASE_TEMPLATE_PARAMETERS,
+ BOTH_MATRICES_MUST_HAVE_THE_SAME_STORAGE_ORDER,
+ THIS_METHOD_IS_ONLY_FOR_DIAGONAL_MATRIX,
+ THE_MATRIX_OR_EXPRESSION_THAT_YOU_PASSED_DOES_NOT_HAVE_THE_EXPECTED_TYPE,
+ THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_WITH_DIRECT_MEMORY_ACCESS_SUCH_AS_MAP_OR_PLAIN_MATRICES,
+ YOU_ALREADY_SPECIFIED_THIS_STRIDE,
+ INVALID_STORAGE_ORDER_FOR_THIS_VECTOR_EXPRESSION,
+ THE_BRACKET_OPERATOR_IS_ONLY_FOR_VECTORS__USE_THE_PARENTHESIS_OPERATOR_INSTEAD,
+ PACKET_ACCESS_REQUIRES_TO_HAVE_INNER_STRIDE_FIXED_TO_1,
+ THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS,
+ YOU_CANNOT_MIX_ARRAYS_AND_MATRICES,
+ YOU_PERFORMED_AN_INVALID_TRANSFORMATION_CONVERSION,
+ THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY,
+ YOU_ARE_TRYING_TO_USE_AN_INDEX_BASED_ACCESSOR_ON_AN_EXPRESSION_THAT_DOES_NOT_SUPPORT_THAT,
+ THIS_METHOD_IS_ONLY_FOR_1x1_EXPRESSIONS,
+ THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_OF_BOOL,
+ THIS_METHOD_IS_ONLY_FOR_ARRAYS_NOT_MATRICES,
+ YOU_PASSED_A_ROW_VECTOR_BUT_A_COLUMN_VECTOR_WAS_EXPECTED,
+ YOU_PASSED_A_COLUMN_VECTOR_BUT_A_ROW_VECTOR_WAS_EXPECTED,
+ THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE,
+ THE_STORAGE_ORDER_OF_BOTH_SIDES_MUST_MATCH,
+ OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG
+ };
+ };
+
+ } // end namespace internal
+
+ } // end namespace Eigen
+
+ // Specialized implementation for MSVC to avoid "conditional
+ // expression is constant" warnings. This implementation doesn't
+ // appear to work under GCC, hence the multiple implementations.
+ #if EIGEN_COMP_MSVC
+
+ #define EIGEN_STATIC_ASSERT(CONDITION,MSG) \
+ {Eigen::internal::static_assertion<bool(CONDITION)>::MSG;}
+
+ #else
+ // In some cases clang interprets bool(CONDITION) as function declaration
+ #define EIGEN_STATIC_ASSERT(CONDITION,MSG) \
+ if (Eigen::internal::static_assertion<static_cast<bool>(CONDITION)>::MSG) {}
+
+ #endif
+
+ #endif // not CXX0X
+
+#else // EIGEN_NO_STATIC_ASSERT
+
+ #define EIGEN_STATIC_ASSERT(CONDITION,MSG) eigen_assert((CONDITION) && #MSG);
+
+#endif // EIGEN_NO_STATIC_ASSERT
+
+
+// static assertion failing if the type \a TYPE is not a vector type
+#define EIGEN_STATIC_ASSERT_VECTOR_ONLY(TYPE) \
+ EIGEN_STATIC_ASSERT(TYPE::IsVectorAtCompileTime, \
+ YOU_TRIED_CALLING_A_VECTOR_METHOD_ON_A_MATRIX)
+
+// static assertion failing if the type \a TYPE is not fixed-size
+#define EIGEN_STATIC_ASSERT_FIXED_SIZE(TYPE) \
+ EIGEN_STATIC_ASSERT(TYPE::SizeAtCompileTime!=Eigen::Dynamic, \
+ YOU_CALLED_A_FIXED_SIZE_METHOD_ON_A_DYNAMIC_SIZE_MATRIX_OR_VECTOR)
+
+// static assertion failing if the type \a TYPE is not dynamic-size
+#define EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(TYPE) \
+ EIGEN_STATIC_ASSERT(TYPE::SizeAtCompileTime==Eigen::Dynamic, \
+ YOU_CALLED_A_DYNAMIC_SIZE_METHOD_ON_A_FIXED_SIZE_MATRIX_OR_VECTOR)
+
+// static assertion failing if the type \a TYPE is not a vector type of the given size
+#define EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(TYPE, SIZE) \
+ EIGEN_STATIC_ASSERT(TYPE::IsVectorAtCompileTime && TYPE::SizeAtCompileTime==SIZE, \
+ THIS_METHOD_IS_ONLY_FOR_VECTORS_OF_A_SPECIFIC_SIZE)
+
+// static assertion failing if the type \a TYPE is not a vector type of the given size
+#define EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(TYPE, ROWS, COLS) \
+ EIGEN_STATIC_ASSERT(TYPE::RowsAtCompileTime==ROWS && TYPE::ColsAtCompileTime==COLS, \
+ THIS_METHOD_IS_ONLY_FOR_MATRICES_OF_A_SPECIFIC_SIZE)
+
+// static assertion failing if the two vector expression types are not compatible (same fixed-size or dynamic size)
+#define EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(TYPE0,TYPE1) \
+ EIGEN_STATIC_ASSERT( \
+ (int(TYPE0::SizeAtCompileTime)==Eigen::Dynamic \
+ || int(TYPE1::SizeAtCompileTime)==Eigen::Dynamic \
+ || int(TYPE0::SizeAtCompileTime)==int(TYPE1::SizeAtCompileTime)),\
+ YOU_MIXED_VECTORS_OF_DIFFERENT_SIZES)
+
+#define EIGEN_PREDICATE_SAME_MATRIX_SIZE(TYPE0,TYPE1) \
+ ( \
+ (int(TYPE0::SizeAtCompileTime)==0 && int(TYPE1::SizeAtCompileTime)==0) \
+ || (\
+ (int(TYPE0::RowsAtCompileTime)==Eigen::Dynamic \
+ || int(TYPE1::RowsAtCompileTime)==Eigen::Dynamic \
+ || int(TYPE0::RowsAtCompileTime)==int(TYPE1::RowsAtCompileTime)) \
+ && (int(TYPE0::ColsAtCompileTime)==Eigen::Dynamic \
+ || int(TYPE1::ColsAtCompileTime)==Eigen::Dynamic \
+ || int(TYPE0::ColsAtCompileTime)==int(TYPE1::ColsAtCompileTime))\
+ ) \
+ )
+
+#ifdef EIGEN2_SUPPORT
+ #define EIGEN_STATIC_ASSERT_NON_INTEGER(TYPE) \
+ eigen_assert(!NumTraits<Scalar>::IsInteger);
+#else
+ #define EIGEN_STATIC_ASSERT_NON_INTEGER(TYPE) \
+ EIGEN_STATIC_ASSERT(!NumTraits<TYPE>::IsInteger, THIS_FUNCTION_IS_NOT_FOR_INTEGER_NUMERIC_TYPES)
+#endif
+
+
+// static assertion failing if it is guaranteed at compile-time that the two matrix expression types have different sizes
+#define EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(TYPE0,TYPE1) \
+ EIGEN_STATIC_ASSERT( \
+ EIGEN_PREDICATE_SAME_MATRIX_SIZE(TYPE0,TYPE1),\
+ YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES)
+
+#define EIGEN_STATIC_ASSERT_SIZE_1x1(TYPE) \
+ EIGEN_STATIC_ASSERT((TYPE::RowsAtCompileTime == 1 || TYPE::RowsAtCompileTime == Dynamic) && \
+ (TYPE::ColsAtCompileTime == 1 || TYPE::ColsAtCompileTime == Dynamic), \
+ THIS_METHOD_IS_ONLY_FOR_1x1_EXPRESSIONS)
+
+#define EIGEN_STATIC_ASSERT_LVALUE(Derived) \
+ EIGEN_STATIC_ASSERT(internal::is_lvalue<Derived>::value, \
+ THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY)
+
+#define EIGEN_STATIC_ASSERT_ARRAYXPR(Derived) \
+ EIGEN_STATIC_ASSERT((internal::is_same<typename internal::traits<Derived>::XprKind, ArrayXpr>::value), \
+ THIS_METHOD_IS_ONLY_FOR_ARRAYS_NOT_MATRICES)
+
+#define EIGEN_STATIC_ASSERT_SAME_XPR_KIND(Derived1, Derived2) \
+ EIGEN_STATIC_ASSERT((internal::is_same<typename internal::traits<Derived1>::XprKind, \
+ typename internal::traits<Derived2>::XprKind \
+ >::value), \
+ YOU_CANNOT_MIX_ARRAYS_AND_MATRICES)
+
+
+#endif // EIGEN_STATIC_ASSERT_H
diff --git a/third_party/eigen3/Eigen/src/Core/util/XprHelper.h b/third_party/eigen3/Eigen/src/Core/util/XprHelper.h
new file mode 100644
index 0000000000..13285909b4
--- /dev/null
+++ b/third_party/eigen3/Eigen/src/Core/util/XprHelper.h
@@ -0,0 +1,481 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_XPRHELPER_H
+#define EIGEN_XPRHELPER_H
+
+// just a workaround because GCC seems to not really like empty structs
+// FIXME: gcc 4.3 generates bad code when strict-aliasing is enabled
+// so currently we simply disable this optimization for gcc 4.3
+#if EIGEN_COMP_GNUC && !EIGEN_GNUC_AT(4,3)
+ #define EIGEN_EMPTY_STRUCT_CTOR(X) \
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE X() {} \
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE X(const X& ) {}
+#else
+ #define EIGEN_EMPTY_STRUCT_CTOR(X)
+#endif
+
+namespace Eigen {
+
+typedef EIGEN_DEFAULT_DENSE_INDEX_TYPE DenseIndex;
+
+namespace internal {
+
+//classes inheriting no_assignment_operator don't generate a default operator=.
+class no_assignment_operator
+{
+ private:
+ no_assignment_operator& operator=(const no_assignment_operator&);
+};
+
+/** \internal return the index type with the largest number of bits */
+template<typename I1, typename I2>
+struct promote_index_type
+{
+ typedef typename conditional<(sizeof(I1)<sizeof(I2)), I2, I1>::type type;
+};
+
+/** \internal If the template parameter Value is Dynamic, this class is just a wrapper around a T variable that
+ * can be accessed using value() and setValue().
+ * Otherwise, this class is an empty structure and value() just returns the template parameter Value.
+ */
+template<typename T, int Value> class variable_if_dynamic
+{
+ public:
+ EIGEN_EMPTY_STRUCT_CTOR(variable_if_dynamic)
+ EIGEN_DEVICE_FUNC explicit variable_if_dynamic(T v) { EIGEN_ONLY_USED_FOR_DEBUG(v); eigen_assert(v == T(Value)); }
+ EIGEN_DEVICE_FUNC static T value() { return T(Value); }
+ EIGEN_DEVICE_FUNC void setValue(T) {}
+};
+
+template<typename T> class variable_if_dynamic<T, Dynamic>
+{
+ T m_value;
+ EIGEN_DEVICE_FUNC variable_if_dynamic() { eigen_assert(false); }
+ public:
+ EIGEN_DEVICE_FUNC explicit variable_if_dynamic(T value) : m_value(value) {}
+ EIGEN_DEVICE_FUNC T value() const { return m_value; }
+ EIGEN_DEVICE_FUNC void setValue(T value) { m_value = value; }
+};
+
+/** \internal like variable_if_dynamic but for DynamicIndex
+ */
+template<typename T, int Value> class variable_if_dynamicindex
+{
+ public:
+ EIGEN_EMPTY_STRUCT_CTOR(variable_if_dynamicindex)
+ EIGEN_DEVICE_FUNC explicit variable_if_dynamicindex(T v) { EIGEN_ONLY_USED_FOR_DEBUG(v); eigen_assert(v == T(Value)); }
+ EIGEN_DEVICE_FUNC static T value() { return T(Value); }
+ EIGEN_DEVICE_FUNC void setValue(T) {}
+};
+
+template<typename T> class variable_if_dynamicindex<T, DynamicIndex>
+{
+ T m_value;
+ EIGEN_DEVICE_FUNC variable_if_dynamicindex() { eigen_assert(false); }
+ public:
+ EIGEN_DEVICE_FUNC explicit variable_if_dynamicindex(T value) : m_value(value) {}
+ EIGEN_DEVICE_FUNC T value() const { return m_value; }
+ EIGEN_DEVICE_FUNC void setValue(T value) { m_value = value; }
+};
+
+template<typename T> struct functor_traits
+{
+ enum
+ {
+ Cost = 10,
+ PacketAccess = false,
+ IsRepeatable = false
+ };
+};
+
+template<typename T> struct packet_traits;
+
+template<typename T> struct unpacket_traits
+{
+ typedef T type;
+ typedef T half;
+ enum {size=1};
+};
+
+template<typename _Scalar, int _Rows, int _Cols,
+ int _Options = AutoAlign |
+ ( (_Rows==1 && _Cols!=1) ? RowMajor
+ : (_Cols==1 && _Rows!=1) ? ColMajor
+ : EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION ),
+ int _MaxRows = _Rows,
+ int _MaxCols = _Cols
+> class make_proper_matrix_type
+{
+ enum {
+ IsColVector = _Cols==1 && _Rows!=1,
+ IsRowVector = _Rows==1 && _Cols!=1,
+ Options = IsColVector ? (_Options | ColMajor) & ~RowMajor
+ : IsRowVector ? (_Options | RowMajor) & ~ColMajor
+ : _Options
+ };
+ public:
+ typedef Matrix<_Scalar, _Rows, _Cols, Options, _MaxRows, _MaxCols> type;
+};
+
+template<typename Scalar, int Rows, int Cols, int Options, int MaxRows, int MaxCols>
+class compute_matrix_flags
+{
+ enum {
+ row_major_bit = Options&RowMajor ? RowMajorBit : 0,
+ is_dynamic_size_storage = MaxRows==Dynamic || MaxCols==Dynamic,
+
+ aligned_bit =
+ (
+ ((Options&DontAlign)==0)
+ && (
+#if EIGEN_ALIGN_STATICALLY
+ ((!is_dynamic_size_storage) && (((MaxCols*MaxRows*int(sizeof(Scalar))) % EIGEN_ALIGN_BYTES) == 0))
+#else
+ 0
+#endif
+
+ ||
+
+#if EIGEN_ALIGN
+ is_dynamic_size_storage
+#else
+ 0
+#endif
+
+ )
+ ) ? AlignedBit : 0,
+ packet_access_bit = packet_traits<Scalar>::Vectorizable && aligned_bit ? PacketAccessBit : 0
+ };
+
+ public:
+ enum { ret = LinearAccessBit | LvalueBit | DirectAccessBit | NestByRefBit | packet_access_bit | row_major_bit | aligned_bit };
+};
+
+template<int _Rows, int _Cols> struct size_at_compile_time
+{
+ enum { ret = (_Rows==Dynamic || _Cols==Dynamic) ? Dynamic : _Rows * _Cols };
+};
+
+/* plain_matrix_type : the difference from eval is that plain_matrix_type is always a plain matrix type,
+ * whereas eval is a const reference in the case of a matrix
+ */
+
+template<typename T, typename StorageKind = typename traits<T>::StorageKind> struct plain_matrix_type;
+template<typename T, typename BaseClassType> struct plain_matrix_type_dense;
+template<typename T> struct plain_matrix_type<T,Dense>
+{
+ typedef typename plain_matrix_type_dense<T,typename traits<T>::XprKind>::type type;
+};
+
+template<typename T> struct plain_matrix_type_dense<T,MatrixXpr>
+{
+ typedef Matrix<typename traits<T>::Scalar,
+ traits<T>::RowsAtCompileTime,
+ traits<T>::ColsAtCompileTime,
+ AutoAlign | (traits<T>::Flags&RowMajorBit ? RowMajor : ColMajor),
+ traits<T>::MaxRowsAtCompileTime,
+ traits<T>::MaxColsAtCompileTime
+ > type;
+};
+
+template<typename T> struct plain_matrix_type_dense<T,ArrayXpr>
+{
+ typedef Array<typename traits<T>::Scalar,
+ traits<T>::RowsAtCompileTime,
+ traits<T>::ColsAtCompileTime,
+ AutoAlign | (traits<T>::Flags&RowMajorBit ? RowMajor : ColMajor),
+ traits<T>::MaxRowsAtCompileTime,
+ traits<T>::MaxColsAtCompileTime
+ > type;
+};
+
+/* eval : the return type of eval(). For matrices, this is just a const reference
+ * in order to avoid a useless copy
+ */
+
+template<typename T, typename StorageKind = typename traits<T>::StorageKind> struct eval;
+
+template<typename T> struct eval<T,Dense>
+{
+ typedef typename plain_matrix_type<T>::type type;
+// typedef typename T::PlainObject type;
+// typedef T::Matrix<typename traits<T>::Scalar,
+// traits<T>::RowsAtCompileTime,
+// traits<T>::ColsAtCompileTime,
+// AutoAlign | (traits<T>::Flags&RowMajorBit ? RowMajor : ColMajor),
+// traits<T>::MaxRowsAtCompileTime,
+// traits<T>::MaxColsAtCompileTime
+// > type;
+};
+
+// for matrices, no need to evaluate, just use a const reference to avoid a useless copy
+template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
+struct eval<Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>, Dense>
+{
+ typedef const Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>& type;
+};
+
+template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
+struct eval<Array<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>, Dense>
+{
+ typedef const Array<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>& type;
+};
+
+
+
+/* plain_matrix_type_column_major : same as plain_matrix_type but guaranteed to be column-major
+ */
+template<typename T> struct plain_matrix_type_column_major
+{
+ enum { Rows = traits<T>::RowsAtCompileTime,
+ Cols = traits<T>::ColsAtCompileTime,
+ MaxRows = traits<T>::MaxRowsAtCompileTime,
+ MaxCols = traits<T>::MaxColsAtCompileTime
+ };
+ typedef Matrix<typename traits<T>::Scalar,
+ Rows,
+ Cols,
+ (MaxRows==1&&MaxCols!=1) ? RowMajor : ColMajor,
+ MaxRows,
+ MaxCols
+ > type;
+};
+
+/* plain_matrix_type_row_major : same as plain_matrix_type but guaranteed to be row-major
+ */
+template<typename T> struct plain_matrix_type_row_major
+{
+ enum { Rows = traits<T>::RowsAtCompileTime,
+ Cols = traits<T>::ColsAtCompileTime,
+ MaxRows = traits<T>::MaxRowsAtCompileTime,
+ MaxCols = traits<T>::MaxColsAtCompileTime
+ };
+ typedef Matrix<typename traits<T>::Scalar,
+ Rows,
+ Cols,
+ (MaxCols==1&&MaxRows!=1) ? RowMajor : ColMajor,
+ MaxRows,
+ MaxCols
+ > type;
+};
+
+// we should be able to get rid of this one too
+template<typename T> struct must_nest_by_value { enum { ret = false }; };
+
+/** \internal The reference selector for template expressions. The idea is that we don't
+ * need to use references for expressions since they are light weight proxy
+ * objects which should generate no copying overhead. */
+template <typename T>
+struct ref_selector
+{
+ typedef typename conditional<
+ bool(traits<T>::Flags & NestByRefBit),
+ T const&,
+ const T
+ >::type type;
+};
+
+/** \internal Adds the const qualifier on the value-type of T2 if and only if T1 is a const type */
+template<typename T1, typename T2>
+struct transfer_constness
+{
+ typedef typename conditional<
+ bool(internal::is_const<T1>::value),
+ typename internal::add_const_on_value_type<T2>::type,
+ T2
+ >::type type;
+};
+
+/** \internal Determines how a given expression should be nested into another one.
+ * For example, when you do a * (b+c), Eigen will determine how the expression b+c should be
+ * nested into the bigger product expression. The choice is between nesting the expression b+c as-is, or
+ * evaluating that expression b+c into a temporary variable d, and nest d so that the resulting expression is
+ * a*d. Evaluating can be beneficial for example if every coefficient access in the resulting expression causes
+ * many coefficient accesses in the nested expressions -- as is the case with matrix product for example.
+ *
+ * \param T the type of the expression being nested
+ * \param n the number of coefficient accesses in the nested expression for each coefficient access in the bigger expression.
+ *
+ * Note that if no evaluation occur, then the constness of T is preserved.
+ *
+ * Example. Suppose that a, b, and c are of type Matrix3d. The user forms the expression a*(b+c).
+ * b+c is an expression "sum of matrices", which we will denote by S. In order to determine how to nest it,
+ * the Product expression uses: nested<S, 3>::type, which turns out to be Matrix3d because the internal logic of
+ * nested determined that in this case it was better to evaluate the expression b+c into a temporary. On the other hand,
+ * since a is of type Matrix3d, the Product expression nests it as nested<Matrix3d, 3>::type, which turns out to be
+ * const Matrix3d&, because the internal logic of nested determined that since a was already a matrix, there was no point
+ * in copying it into another matrix.
+ */
+template<typename T, int n=1, typename PlainObject = typename eval<T>::type> struct nested
+{
+ enum {
+ // for the purpose of this test, to keep it reasonably simple, we arbitrarily choose a value of Dynamic values.
+ // the choice of 10000 makes it larger than any practical fixed value and even most dynamic values.
+ // in extreme cases where these assumptions would be wrong, we would still at worst suffer performance issues
+ // (poor choice of temporaries).
+ // it's important that this value can still be squared without integer overflowing.
+ DynamicAsInteger = 10000,
+ ScalarReadCost = NumTraits<typename traits<T>::Scalar>::ReadCost,
+ ScalarReadCostAsInteger = ScalarReadCost == Dynamic ? int(DynamicAsInteger) : int(ScalarReadCost),
+ CoeffReadCost = traits<T>::CoeffReadCost,
+ CoeffReadCostAsInteger = CoeffReadCost == Dynamic ? int(DynamicAsInteger) : int(CoeffReadCost),
+ NAsInteger = n == Dynamic ? int(DynamicAsInteger) : n,
+ CostEvalAsInteger = (NAsInteger+1) * ScalarReadCostAsInteger + CoeffReadCostAsInteger,
+ CostNoEvalAsInteger = NAsInteger * CoeffReadCostAsInteger
+ };
+
+ typedef typename conditional<
+ ( (int(traits<T>::Flags) & EvalBeforeNestingBit) ||
+ int(CostEvalAsInteger) < int(CostNoEvalAsInteger)
+ ),
+ PlainObject,
+ typename ref_selector<T>::type
+ >::type type;
+};
+
+template<typename T>
+EIGEN_DEVICE_FUNC
+T* const_cast_ptr(const T* ptr)
+{
+ return const_cast<T*>(ptr);
+}
+
+template<typename Derived, typename XprKind = typename traits<Derived>::XprKind>
+struct dense_xpr_base
+{
+ /* dense_xpr_base should only ever be used on dense expressions, thus falling either into the MatrixXpr or into the ArrayXpr cases */
+};
+
+template<typename Derived>
+struct dense_xpr_base<Derived, MatrixXpr>
+{
+ typedef MatrixBase<Derived> type;
+};
+
+template<typename Derived>
+struct dense_xpr_base<Derived, ArrayXpr>
+{
+ typedef ArrayBase<Derived> type;
+};
+
+/** \internal Helper base class to add a scalar multiple operator
+ * overloads for complex types */
+template<typename Derived,typename Scalar,typename OtherScalar,
+ bool EnableIt = !is_same<Scalar,OtherScalar>::value >
+struct special_scalar_op_base : public DenseCoeffsBase<Derived>
+{
+ // dummy operator* so that the
+ // "using special_scalar_op_base::operator*" compiles
+ void operator*() const;
+};
+
+template<typename Derived,typename Scalar,typename OtherScalar>
+struct special_scalar_op_base<Derived,Scalar,OtherScalar,true> : public DenseCoeffsBase<Derived>
+{
+ const CwiseUnaryOp<scalar_multiple2_op<Scalar,OtherScalar>, Derived>
+ operator*(const OtherScalar& scalar) const
+ {
+ return CwiseUnaryOp<scalar_multiple2_op<Scalar,OtherScalar>, Derived>
+ (*static_cast<const Derived*>(this), scalar_multiple2_op<Scalar,OtherScalar>(scalar));
+ }
+
+ inline friend const CwiseUnaryOp<scalar_multiple2_op<Scalar,OtherScalar>, Derived>
+ operator*(const OtherScalar& scalar, const Derived& matrix)
+ { return static_cast<const special_scalar_op_base&>(matrix).operator*(scalar); }
+};
+
+template<typename XprType, typename CastType> struct cast_return_type
+{
+ typedef typename XprType::Scalar CurrentScalarType;
+ typedef typename remove_all<CastType>::type _CastType;
+ typedef typename _CastType::Scalar NewScalarType;
+ typedef typename conditional<is_same<CurrentScalarType,NewScalarType>::value,
+ const XprType&,CastType>::type type;
+};
+
+template <typename A, typename B> struct promote_storage_type;
+
+template <typename A> struct promote_storage_type<A,A>
+{
+ typedef A ret;
+};
+template <typename A> struct promote_storage_type<A, const A>
+{
+ typedef A ret;
+};
+template <typename A> struct promote_storage_type<const A, A>
+{
+ typedef A ret;
+};
+
+
+
+/** \internal gives the plain matrix or array type to store a row/column/diagonal of a matrix type.
+ * \param Scalar optional parameter allowing to pass a different scalar type than the one of the MatrixType.
+ */
+template<typename ExpressionType, typename Scalar = typename ExpressionType::Scalar>
+struct plain_row_type
+{
+ typedef Matrix<Scalar, 1, ExpressionType::ColsAtCompileTime,
+ ExpressionType::PlainObject::Options | RowMajor, 1, ExpressionType::MaxColsAtCompileTime> MatrixRowType;
+ typedef Array<Scalar, 1, ExpressionType::ColsAtCompileTime,
+ ExpressionType::PlainObject::Options | RowMajor, 1, ExpressionType::MaxColsAtCompileTime> ArrayRowType;
+
+ typedef typename conditional<
+ is_same< typename traits<ExpressionType>::XprKind, MatrixXpr >::value,
+ MatrixRowType,
+ ArrayRowType
+ >::type type;
+};
+
+template<typename ExpressionType, typename Scalar = typename ExpressionType::Scalar>
+struct plain_col_type
+{
+ typedef Matrix<Scalar, ExpressionType::RowsAtCompileTime, 1,
+ ExpressionType::PlainObject::Options & ~RowMajor, ExpressionType::MaxRowsAtCompileTime, 1> MatrixColType;
+ typedef Array<Scalar, ExpressionType::RowsAtCompileTime, 1,
+ ExpressionType::PlainObject::Options & ~RowMajor, ExpressionType::MaxRowsAtCompileTime, 1> ArrayColType;
+
+ typedef typename conditional<
+ is_same< typename traits<ExpressionType>::XprKind, MatrixXpr >::value,
+ MatrixColType,
+ ArrayColType
+ >::type type;
+};
+
+template<typename ExpressionType, typename Scalar = typename ExpressionType::Scalar>
+struct plain_diag_type
+{
+ enum { diag_size = EIGEN_SIZE_MIN_PREFER_DYNAMIC(ExpressionType::RowsAtCompileTime, ExpressionType::ColsAtCompileTime),
+ max_diag_size = EIGEN_SIZE_MIN_PREFER_FIXED(ExpressionType::MaxRowsAtCompileTime, ExpressionType::MaxColsAtCompileTime)
+ };
+ typedef Matrix<Scalar, diag_size, 1, ExpressionType::PlainObject::Options & ~RowMajor, max_diag_size, 1> MatrixDiagType;
+ typedef Array<Scalar, diag_size, 1, ExpressionType::PlainObject::Options & ~RowMajor, max_diag_size, 1> ArrayDiagType;
+
+ typedef typename conditional<
+ is_same< typename traits<ExpressionType>::XprKind, MatrixXpr >::value,
+ MatrixDiagType,
+ ArrayDiagType
+ >::type type;
+};
+
+template<typename ExpressionType>
+struct is_lvalue
+{
+ enum { value = !bool(is_const<ExpressionType>::value) &&
+ bool(traits<ExpressionType>::Flags & LvalueBit) };
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_XPRHELPER_H