aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--CMakeLists.txt10
-rw-r--r--Eigen/Core9
-rw-r--r--Eigen/Eigen2Support1
-rw-r--r--Eigen/StdVector39
-rw-r--r--Eigen/src/Array/Array.h2
-rw-r--r--Eigen/src/Array/ArrayWrapper.h19
-rw-r--r--Eigen/src/Array/GlobalFunctions.h55
-rw-r--r--Eigen/src/Array/Random.h2
-rw-r--r--Eigen/src/Array/Replicate.h6
-rw-r--r--Eigen/src/Array/Reverse.h6
-rw-r--r--Eigen/src/Array/Select.h10
-rw-r--r--Eigen/src/Array/VectorwiseOp.h4
-rw-r--r--Eigen/src/Cholesky/LDLT.h2
-rw-r--r--Eigen/src/Core/AnyMatrixBase.h6
-rw-r--r--Eigen/src/Core/Assign.h31
-rw-r--r--Eigen/src/Core/BandMatrix.h3
-rw-r--r--Eigen/src/Core/Block.h4
-rw-r--r--Eigen/src/Core/CwiseBinaryOp.h7
-rw-r--r--Eigen/src/Core/CwiseNullaryOp.h78
-rw-r--r--Eigen/src/Core/CwiseUnaryOp.h32
-rw-r--r--Eigen/src/Core/CwiseUnaryView.h28
-rw-r--r--Eigen/src/Core/DenseBase.h40
-rw-r--r--Eigen/src/Core/DenseStorageBase.h11
-rw-r--r--Eigen/src/Core/Diagonal.h5
-rw-r--r--Eigen/src/Core/DiagonalMatrix.h4
-rw-r--r--Eigen/src/Core/DiagonalProduct.h3
-rw-r--r--Eigen/src/Core/ExpressionMaker.h55
-rw-r--r--Eigen/src/Core/Flagged.h (renamed from Eigen/src/Eigen2Support/Flagged.h)59
-rw-r--r--Eigen/src/Core/ForceAlignedAccess.h7
-rw-r--r--Eigen/src/Core/Functors.h71
-rw-r--r--Eigen/src/Core/GenericPacketMath.h4
-rw-r--r--Eigen/src/Core/IO.h13
-rw-r--r--Eigen/src/Core/Map.h2
-rw-r--r--Eigen/src/Core/MapBase.h6
-rw-r--r--Eigen/src/Core/MathFunctions.h61
-rw-r--r--Eigen/src/Core/Matrix.h8
-rw-r--r--Eigen/src/Core/MatrixBase.h26
-rw-r--r--Eigen/src/Core/MatrixStorage.h18
-rw-r--r--Eigen/src/Core/Minor.h3
-rw-r--r--Eigen/src/Core/NestByValue.h9
-rw-r--r--Eigen/src/Core/NoAlias.h8
-rw-r--r--Eigen/src/Core/NumTraits.h36
-rw-r--r--Eigen/src/Core/Product.h60
-rw-r--r--Eigen/src/Core/ProductBase.h55
-rw-r--r--Eigen/src/Core/Redux.h17
-rw-r--r--Eigen/src/Core/ReturnByValue.h22
-rw-r--r--Eigen/src/Core/SelfCwiseBinaryOp.h4
-rw-r--r--Eigen/src/Core/SolveTriangular.h2
-rw-r--r--Eigen/src/Core/StableNorm.h4
-rw-r--r--Eigen/src/Core/Swap.h2
-rw-r--r--Eigen/src/Core/Transpose.h41
-rw-r--r--Eigen/src/Core/TriangularMatrix.h16
-rw-r--r--Eigen/src/Core/VectorBlock.h4
-rw-r--r--Eigen/src/Core/arch/SSE/PacketMath.h4
-rw-r--r--Eigen/src/Core/products/CoeffBasedProduct.h (renamed from Eigen/src/Core/products/GeneralUnrolled.h)74
-rw-r--r--Eigen/src/Core/util/BlasUtil.h18
-rw-r--r--Eigen/src/Core/util/Constants.h13
-rw-r--r--Eigen/src/Core/util/ForwardDeclarations.h4
-rw-r--r--Eigen/src/Core/util/Macros.h41
-rw-r--r--Eigen/src/Core/util/Memory.h10
-rw-r--r--Eigen/src/Core/util/StaticAssert.h1
-rw-r--r--Eigen/src/Core/util/XprHelper.h26
-rw-r--r--Eigen/src/Eigen2Support/Lazy.h82
-rw-r--r--Eigen/src/Eigenvalues/ComplexEigenSolver.h2
-rw-r--r--Eigen/src/Eigenvalues/ComplexSchur.h2
-rw-r--r--Eigen/src/Geometry/AlignedBox.h218
-rw-r--r--Eigen/src/Geometry/AngleAxis.h14
-rw-r--r--Eigen/src/Geometry/EulerAngles.h2
-rw-r--r--Eigen/src/Geometry/Homogeneous.h5
-rw-r--r--Eigen/src/Geometry/Hyperplane.h2
-rw-r--r--Eigen/src/Geometry/ParametrizedLine.h2
-rw-r--r--Eigen/src/Geometry/Quaternion.h11
-rw-r--r--Eigen/src/Geometry/Rotation2D.h2
-rw-r--r--Eigen/src/Geometry/Scaling.h2
-rw-r--r--Eigen/src/Geometry/Transform.h11
-rw-r--r--Eigen/src/Geometry/Translation.h2
-rw-r--r--Eigen/src/Geometry/Umeyama.h6
-rw-r--r--Eigen/src/Householder/Householder.h6
-rw-r--r--Eigen/src/LU/Determinant.h2
-rw-r--r--Eigen/src/LU/FullPivLU.h8
-rw-r--r--Eigen/src/LU/PartialPivLU.h2
-rw-r--r--Eigen/src/LU/arch/Inverse_SSE.h4
-rw-r--r--Eigen/src/QR/ColPivHouseholderQR.h6
-rw-r--r--Eigen/src/QR/FullPivHouseholderQR.h6
-rw-r--r--Eigen/src/SVD/JacobiSVD.h2
-rw-r--r--Eigen/src/SVD/SVD.h4
-rw-r--r--Eigen/src/Sparse/AmbiVector.h2
-rw-r--r--Eigen/src/Sparse/CompressedStorage.h2
-rw-r--r--Eigen/src/Sparse/DynamicSparseMatrix.h11
-rw-r--r--Eigen/src/Sparse/SparseBlock.h97
-rw-r--r--Eigen/src/Sparse/SparseCwiseUnaryOp.h6
-rw-r--r--Eigen/src/Sparse/SparseExpressionMaker.h42
-rw-r--r--Eigen/src/Sparse/SparseLDLT.h4
-rw-r--r--Eigen/src/Sparse/SparseLLT.h4
-rw-r--r--Eigen/src/Sparse/SparseLU.h4
-rw-r--r--Eigen/src/Sparse/SparseMatrix.h11
-rw-r--r--Eigen/src/Sparse/SparseMatrixBase.h26
-rw-r--r--Eigen/src/Sparse/SparseProduct.h5
-rw-r--r--Eigen/src/Sparse/SparseVector.h11
-rw-r--r--Eigen/src/plugins/MatrixCwiseUnaryOps.h2
-rw-r--r--bench/BenchSparseUtil.h14
-rw-r--r--bench/sparse_product.cpp147
-rw-r--r--cmake/EigenTesting.cmake14
-rw-r--r--doc/A05_PortingFrom2To3.dox8
-rw-r--r--doc/D01_StlContainers.dox15
-rw-r--r--doc/snippets/DenseBase_LinSpaced.cpp2
-rw-r--r--doc/snippets/DenseBase_LinSpaced_seq.cpp2
-rw-r--r--doc/snippets/DenseBase_setLinSpaced.cpp3
-rw-r--r--scripts/eigen_gen_docs10
-rw-r--r--test/CMakeLists.txt7
-rw-r--r--test/array.cpp30
-rw-r--r--test/first_aligned.cpp6
-rw-r--r--test/geo_alignedbox.cpp122
-rw-r--r--test/geo_hyperplane.cpp2
-rw-r--r--test/geo_quaternion.cpp3
-rw-r--r--test/geo_transformations.cpp4
-rw-r--r--test/inverse.cpp1
-rw-r--r--test/lu.cpp10
-rw-r--r--test/map.cpp46
-rw-r--r--test/nesting_ops.cpp56
-rw-r--r--test/nullary.cpp54
-rw-r--r--test/prec_inverse_4x4.cpp6
-rw-r--r--test/product.h2
-rw-r--r--test/product_notemporary.cpp24
-rw-r--r--test/redux.cpp13
-rw-r--r--test/stdvector_overload.cpp176
-rw-r--r--test/submatrices.cpp11
-rw-r--r--unsupported/Eigen/AlignedVector35
-rw-r--r--unsupported/Eigen/AutoDiff2
-rw-r--r--unsupported/Eigen/FFT4
-rw-r--r--unsupported/Eigen/NonLinearOptimization22
-rw-r--r--unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h230
-rw-r--r--unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h321
-rw-r--r--unsupported/Eigen/src/MatrixFunctions/MatrixFunctionAtomic.h14
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h362
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h357
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/chkder.h27
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/covar.h41
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/dogleg.h70
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/fdjac1.h30
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/lmpar.h143
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/qform.h89
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/qrfac.h131
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/qrsolv.h55
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/r1mpyq.h66
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/r1updt.h225
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/rwupdt.h103
-rw-r--r--unsupported/Eigen/src/NumericalDiff/NumericalDiff.h2
-rw-r--r--unsupported/doc/examples/MatrixExponential.cpp3
-rw-r--r--unsupported/doc/examples/MatrixFunction.cpp7
-rw-r--r--unsupported/doc/examples/MatrixSine.cpp6
-rw-r--r--unsupported/doc/examples/MatrixSinh.cpp6
-rw-r--r--unsupported/test/FFT.cpp243
-rw-r--r--unsupported/test/FFTW.cpp9
-rw-r--r--unsupported/test/NonLinearOptimization.cpp84
-rw-r--r--unsupported/test/matrix_exponential.cpp13
-rw-r--r--unsupported/test/matrix_function.cpp126
157 files changed, 2919 insertions, 2516 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index a7d4089c0..f4c2973d5 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -56,14 +56,18 @@ if(CMAKE_COMPILER_IS_GNUCXX)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wnon-virtual-dtor -Wno-long-long -ansi -Wundef -Wcast-align -Wchar-subscripts -Wall -W -Wpointer-arith -Wwrite-strings -Wformat-security -fexceptions -fno-check-new -fno-common -fstrict-aliasing")
set(CMAKE_CXX_FLAGS_DEBUG "-g3")
set(CMAKE_CXX_FLAGS_RELEASE "-g0 -O2")
+
+ check_cxx_compiler_flag("-Wno-variadic-macros" COMPILER_SUPPORT_WNOVARIADICMACRO)
+ if(COMPILER_SUPPORT_WNOVARIADICMACRO)
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-variadic-macros")
+ endif()
+
check_cxx_compiler_flag("-Wextra" COMPILER_SUPPORT_WEXTRA)
if(COMPILER_SUPPORT_WEXTRA)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wextra")
endif()
- if(NOT EIGEN_TEST_LIB)
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pedantic")
- endif()
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pedantic")
option(EIGEN_TEST_SSE2 "Enable/Disable SSE2 in tests/examples" OFF)
if(EIGEN_TEST_SSE2)
diff --git a/Eigen/Core b/Eigen/Core
index 2a9ee08e8..18b0fafa9 100644
--- a/Eigen/Core
+++ b/Eigen/Core
@@ -121,6 +121,10 @@
namespace Eigen {
+// we use size_t frequently and we'll never remember to prepend it with std:: everytime just to
+// ensure QNX/QCC support
+using std::size_t;
+
/** \defgroup Core_Module Core module
* This is the main module of Eigen providing dense matrix and vector support
* (both fixed and dynamic size) with all the features corresponding to a BLAS library
@@ -199,12 +203,13 @@ struct Dense {};
#include "src/Core/IO.h"
#include "src/Core/Swap.h"
#include "src/Core/CommaInitializer.h"
+#include "src/Core/Flagged.h"
#include "src/Core/ProductBase.h"
#include "src/Core/Product.h"
#include "src/Core/TriangularMatrix.h"
#include "src/Core/SelfAdjointView.h"
#include "src/Core/SolveTriangular.h"
-#include "src/Core/products/GeneralUnrolled.h"
+#include "src/Core/products/CoeffBasedProduct.h"
#include "src/Core/products/GeneralBlockPanelKernel.h"
#include "src/Core/products/GeneralMatrixVector.h"
#include "src/Core/products/GeneralMatrixMatrix.h"
@@ -253,6 +258,8 @@ struct Dense {};
} // namespace Eigen
+#include "src/Array/GlobalFunctions.h"
+
#include "src/Core/util/EnableMSVCWarnings.h"
#ifdef EIGEN2_SUPPORT
diff --git a/Eigen/Eigen2Support b/Eigen/Eigen2Support
index 9e3bff68f..bd6306aff 100644
--- a/Eigen/Eigen2Support
+++ b/Eigen/Eigen2Support
@@ -42,7 +42,6 @@ namespace Eigen {
*
*/
-#include "src/Eigen2Support/Flagged.h"
#include "src/Eigen2Support/Lazy.h"
#include "src/Eigen2Support/Cwise.h"
#include "src/Eigen2Support/CwiseOperators.h"
diff --git a/Eigen/StdVector b/Eigen/StdVector
index b6dbde8a2..04e356785 100644
--- a/Eigen/StdVector
+++ b/Eigen/StdVector
@@ -29,6 +29,45 @@
#include "Core"
#include <vector>
+// Define the explicit instantiation (e.g. necessary for the Intel compiler)
+#if defined(__INTEL_COMPILER) || defined(__GNUC__)
+ #define EIGEN_EXPLICIT_STL_VECTOR_INSTANTIATION(...) template class std::vector<__VA_ARGS__, Eigen::aligned_allocator<__VA_ARGS__> >;
+#else
+ #define EIGEN_EXPLICIT_STL_VECTOR_INSTANTIATION(...)
+#endif
+
+/**
+ * This section contains a convenience MACRO which allows an easy specialization of
+ * std::vector such that for data types with alignment issues the correct allocator
+ * is used automatically.
+ */
+#define EIGEN_DEFINE_STL_VECTOR_SPECIALIZATION(...) \
+EIGEN_EXPLICIT_STL_VECTOR_INSTANTIATION(__VA_ARGS__) \
+namespace std \
+{ \
+ template<typename _Ay> \
+ class vector<__VA_ARGS__, _Ay> \
+ : public vector<__VA_ARGS__, Eigen::aligned_allocator<__VA_ARGS__> > \
+ { \
+ typedef vector<__VA_ARGS__, Eigen::aligned_allocator<__VA_ARGS__> > vector_base; \
+ public: \
+ typedef __VA_ARGS__ value_type; \
+ typedef typename vector_base::allocator_type allocator_type; \
+ typedef typename vector_base::size_type size_type; \
+ typedef typename vector_base::iterator iterator; \
+ explicit vector(const allocator_type& a = allocator_type()) : vector_base(a) {} \
+ template<typename InputIterator> \
+ vector(InputIterator first, InputIterator last, const allocator_type& a = allocator_type()) : vector_base(first, last, a) {} \
+ vector(const vector& c) : vector_base(c) {} \
+ explicit vector(size_type num, const value_type& val = value_type()) : vector_base(num, val) {} \
+ vector(iterator start, iterator end) : vector_base(start, end) {} \
+ vector& operator=(const vector& x) { \
+ vector_base::operator=(x); \
+ return *this; \
+ } \
+ }; \
+}
+
namespace Eigen {
// This one is needed to prevent reimplementing the whole std::vector.
diff --git a/Eigen/src/Array/Array.h b/Eigen/src/Array/Array.h
index b8328417a..ceef71afd 100644
--- a/Eigen/src/Array/Array.h
+++ b/Eigen/src/Array/Array.h
@@ -38,7 +38,7 @@ class Array
public:
typedef DenseStorageBase<Array<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>, Eigen::ArrayBase, _Options> Base;
- _EIGEN_DENSE_PUBLIC_INTERFACE(Array)
+ EIGEN_DENSE_PUBLIC_INTERFACE(Array)
enum { Options = _Options };
typedef typename Base::PlainMatrixType PlainMatrixType;
diff --git a/Eigen/src/Array/ArrayWrapper.h b/Eigen/src/Array/ArrayWrapper.h
index b62d66d8c..75bc33770 100644
--- a/Eigen/src/Array/ArrayWrapper.h
+++ b/Eigen/src/Array/ArrayWrapper.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2009 Gael Guennebaud <g.gael@free.fr>
+// Copyright (C) 2009-2010 Gael Guennebaud <g.gael@free.fr>
//
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
@@ -36,7 +36,7 @@
*/
template<typename ExpressionType>
struct ei_traits<ArrayWrapper<ExpressionType> >
- : public ei_traits<ExpressionType>
+ : public ei_traits<typename ei_cleantype<typename ExpressionType::Nested>::type >
{
typedef DenseStorageArray DenseStorageType;
};
@@ -46,9 +46,11 @@ class ArrayWrapper : public ArrayBase<ArrayWrapper<ExpressionType> >
{
public:
typedef ArrayBase<ArrayWrapper> Base;
- _EIGEN_DENSE_PUBLIC_INTERFACE(ArrayWrapper)
+ EIGEN_DENSE_PUBLIC_INTERFACE(ArrayWrapper)
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(ArrayWrapper)
+ typedef typename ei_nested<ExpressionType>::type NestedExpressionType;
+
inline ArrayWrapper(const ExpressionType& matrix) : m_expression(matrix) {}
inline int rows() const { return m_expression.rows(); }
@@ -103,7 +105,7 @@ class ArrayWrapper : public ArrayBase<ArrayWrapper<ExpressionType> >
inline void evalTo(Dest& dst) const { dst = m_expression; }
protected:
- const ExpressionType& m_expression;
+ const NestedExpressionType m_expression;
};
/** \class MatrixWrapper
@@ -118,7 +120,7 @@ class ArrayWrapper : public ArrayBase<ArrayWrapper<ExpressionType> >
template<typename ExpressionType>
struct ei_traits<MatrixWrapper<ExpressionType> >
- : public ei_traits<ExpressionType>
+ : public ei_traits<typename ei_cleantype<typename ExpressionType::Nested>::type >
{
typedef DenseStorageMatrix DenseStorageType;
};
@@ -127,9 +129,12 @@ template<typename ExpressionType>
class MatrixWrapper : public MatrixBase<MatrixWrapper<ExpressionType> >
{
public:
- EIGEN_GENERIC_PUBLIC_INTERFACE(MatrixWrapper)
+ typedef MatrixBase<MatrixWrapper<ExpressionType> > Base;
+ EIGEN_DENSE_PUBLIC_INTERFACE(MatrixWrapper)
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(MatrixWrapper);
+ typedef typename ei_nested<ExpressionType>::type NestedExpressionType;
+
inline MatrixWrapper(const ExpressionType& matrix) : m_expression(matrix) {}
inline int rows() const { return m_expression.rows(); }
@@ -181,7 +186,7 @@ class MatrixWrapper : public MatrixBase<MatrixWrapper<ExpressionType> >
}
protected:
- const ExpressionType& m_expression;
+ const NestedExpressionType& m_expression;
};
#endif // EIGEN_ARRAYWRAPPER_H
diff --git a/Eigen/src/Array/GlobalFunctions.h b/Eigen/src/Array/GlobalFunctions.h
new file mode 100644
index 000000000..33f00c2ba
--- /dev/null
+++ b/Eigen/src/Array/GlobalFunctions.h
@@ -0,0 +1,55 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2010 Gael Guennebaud <g.gael@free.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_GLOBAL_FUNCTIONS_H
+#define EIGEN_GLOBAL_FUNCTIONS_H
+
+#define EIGEN_ARRAY_DECLARARE_GLOBAL_UNARY(NAME,FUNCTOR) \
+ template<typename Derived> \
+ inline const Eigen::CwiseUnaryOp<Eigen::FUNCTOR<typename Derived::Scalar>, Derived> \
+ NAME(const Eigen::ArrayBase<Derived>& x) { \
+ return x.derived(); \
+ }
+
+namespace std
+{
+ EIGEN_ARRAY_DECLARARE_GLOBAL_UNARY(sin,ei_scalar_sin_op)
+ EIGEN_ARRAY_DECLARARE_GLOBAL_UNARY(cos,ei_scalar_cos_op)
+ EIGEN_ARRAY_DECLARARE_GLOBAL_UNARY(exp,ei_scalar_exp_op)
+ EIGEN_ARRAY_DECLARARE_GLOBAL_UNARY(log,ei_scalar_log_op)
+ EIGEN_ARRAY_DECLARARE_GLOBAL_UNARY(abs,ei_scalar_abs_op)
+ EIGEN_ARRAY_DECLARARE_GLOBAL_UNARY(sqrt,ei_scalar_sqrt_op)
+}
+
+namespace Eigen
+{
+ EIGEN_ARRAY_DECLARARE_GLOBAL_UNARY(ei_sin,ei_scalar_sin_op)
+ EIGEN_ARRAY_DECLARARE_GLOBAL_UNARY(ei_cos,ei_scalar_cos_op)
+ EIGEN_ARRAY_DECLARARE_GLOBAL_UNARY(ei_exp,ei_scalar_exp_op)
+ EIGEN_ARRAY_DECLARARE_GLOBAL_UNARY(ei_log,ei_scalar_log_op)
+ EIGEN_ARRAY_DECLARARE_GLOBAL_UNARY(ei_abs,ei_scalar_abs_op)
+ EIGEN_ARRAY_DECLARARE_GLOBAL_UNARY(ei_sqrt,ei_scalar_sqrt_op)
+}
+
+#endif // EIGEN_GLOBAL_FUNCTIONS_H
diff --git a/Eigen/src/Array/Random.h b/Eigen/src/Array/Random.h
index 97ca0fba3..e1f4aa7ca 100644
--- a/Eigen/src/Array/Random.h
+++ b/Eigen/src/Array/Random.h
@@ -27,7 +27,7 @@
template<typename Scalar> struct ei_scalar_random_op {
EIGEN_EMPTY_STRUCT_CTOR(ei_scalar_random_op)
- inline const Scalar operator() (int, int) const { return ei_random<Scalar>(); }
+ inline const Scalar operator() (int, int = 0) const { return ei_random<Scalar>(); }
};
template<typename Scalar>
struct ei_functor_traits<ei_scalar_random_op<Scalar> >
diff --git a/Eigen/src/Array/Replicate.h b/Eigen/src/Array/Replicate.h
index 3f87e09fe..cd23e0d6f 100644
--- a/Eigen/src/Array/Replicate.h
+++ b/Eigen/src/Array/Replicate.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2009 Gael Guennebaud <g.gael@free.fr>
+// Copyright (C) 2009-2010 Gael Guennebaud <g.gael@free.fr>
//
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
@@ -55,7 +55,7 @@ struct ei_traits<Replicate<MatrixType,RowFactor,ColFactor> >
: ColFactor * MatrixType::ColsAtCompileTime,
MaxRowsAtCompileTime = RowsAtCompileTime,
MaxColsAtCompileTime = ColsAtCompileTime,
- Flags = _MatrixTypeNested::Flags & HereditaryBits,
+ Flags = (_MatrixTypeNested::Flags & HereditaryBits),
CoeffReadCost = _MatrixTypeNested::CoeffReadCost
};
};
@@ -66,7 +66,7 @@ template<typename MatrixType,int RowFactor,int ColFactor> class Replicate
public:
typedef typename MatrixType::template MakeBase< Replicate<MatrixType,RowFactor,ColFactor> >::Type Base;
- _EIGEN_GENERIC_PUBLIC_INTERFACE(Replicate)
+ EIGEN_DENSE_PUBLIC_INTERFACE(Replicate)
template<typename OriginalMatrixType>
inline explicit Replicate(const OriginalMatrixType& matrix)
diff --git a/Eigen/src/Array/Reverse.h b/Eigen/src/Array/Reverse.h
index 7d2c34816..a405fbb4b 100644
--- a/Eigen/src/Array/Reverse.h
+++ b/Eigen/src/Array/Reverse.h
@@ -3,7 +3,7 @@
//
// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
// Copyright (C) 2009 Ricard Marxer <email@ricardmarxer.com>
-// Copyright (C) 2009 Gael Guennebaud <g.gael@free.fr>
+// Copyright (C) 2009-2010 Gael Guennebaud <g.gael@free.fr>
//
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
@@ -59,7 +59,7 @@ struct ei_traits<Reverse<MatrixType, Direction> >
LinearAccess = ( (Direction==BothDirections) && (int(_MatrixTypeNested::Flags)&PacketAccessBit) )
? LinearAccessBit : 0,
- Flags = (int(_MatrixTypeNested::Flags) & (HereditaryBits | PacketAccessBit | LinearAccess)),
+ Flags = int(_MatrixTypeNested::Flags) & (HereditaryBits | PacketAccessBit | LinearAccess),
CoeffReadCost = _MatrixTypeNested::CoeffReadCost
};
@@ -80,7 +80,7 @@ template<typename MatrixType, int Direction> class Reverse
public:
typedef typename MatrixType::template MakeBase< Reverse<MatrixType, Direction> >::Type Base;
- _EIGEN_GENERIC_PUBLIC_INTERFACE(Reverse)
+ EIGEN_DENSE_PUBLIC_INTERFACE(Reverse)
protected:
enum {
diff --git a/Eigen/src/Array/Select.h b/Eigen/src/Array/Select.h
index b1fab69c9..43735bd76 100644
--- a/Eigen/src/Array/Select.h
+++ b/Eigen/src/Array/Select.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+// Copyright (C) 2008-2010 Gael Guennebaud <g.gael@free.fr>
//
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
@@ -56,9 +56,9 @@ struct ei_traits<Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> >
MaxRowsAtCompileTime = ConditionMatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = ConditionMatrixType::MaxColsAtCompileTime,
Flags = (unsigned int)ThenMatrixType::Flags & ElseMatrixType::Flags & HereditaryBits,
- CoeffReadCost = ei_traits<typename ei_cleantype<ConditionMatrixNested>::type>::CoeffReadCost
- + EIGEN_ENUM_MAX(ei_traits<typename ei_cleantype<ThenMatrixNested>::type>::CoeffReadCost,
- ei_traits<typename ei_cleantype<ElseMatrixNested>::type>::CoeffReadCost)
+ CoeffReadCost = ei_traits<typename ei_cleantype<ConditionMatrixNested>::type>::CoeffReadCost
+ + EIGEN_ENUM_MAX(ei_traits<typename ei_cleantype<ThenMatrixNested>::type>::CoeffReadCost,
+ ei_traits<typename ei_cleantype<ElseMatrixNested>::type>::CoeffReadCost)
};
};
@@ -69,7 +69,7 @@ class Select : ei_no_assignment_operator,
public:
typedef typename ThenMatrixType::template MakeBase< Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> >::Type Base;
- _EIGEN_GENERIC_PUBLIC_INTERFACE(Select)
+ EIGEN_DENSE_PUBLIC_INTERFACE(Select)
Select(const ConditionMatrixType& conditionMatrix,
const ThenMatrixType& thenMatrix,
diff --git a/Eigen/src/Array/VectorwiseOp.h b/Eigen/src/Array/VectorwiseOp.h
index eef554d8a..697a07d32 100644
--- a/Eigen/src/Array/VectorwiseOp.h
+++ b/Eigen/src/Array/VectorwiseOp.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2008-2009 Gael Guennebaud <g.gael@free.fr>
+// Copyright (C) 2008-2010 Gael Guennebaud <g.gael@free.fr>
// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
//
// Eigen is free software; you can redistribute it and/or
@@ -80,7 +80,7 @@ class PartialReduxExpr : ei_no_assignment_operator,
public:
typedef typename MatrixType::template MakeBase< PartialReduxExpr<MatrixType, MemberOp, Direction> >::Type Base;
- _EIGEN_GENERIC_PUBLIC_INTERFACE(PartialReduxExpr)
+ EIGEN_DENSE_PUBLIC_INTERFACE(PartialReduxExpr)
typedef typename ei_traits<PartialReduxExpr>::MatrixTypeNested MatrixTypeNested;
typedef typename ei_traits<PartialReduxExpr>::_MatrixTypeNested _MatrixTypeNested;
diff --git a/Eigen/src/Cholesky/LDLT.h b/Eigen/src/Cholesky/LDLT.h
index 8b13d8e2e..b794b0c43 100644
--- a/Eigen/src/Cholesky/LDLT.h
+++ b/Eigen/src/Cholesky/LDLT.h
@@ -206,7 +206,7 @@ LDLT<MatrixType>& LDLT<MatrixType>::compute(const MatrixType& a)
// in "Analysis of the Cholesky Decomposition of a Semi-definite Matrix" by
// Nicholas J. Higham. Also see "Accuracy and Stability of Numerical
// Algorithms" page 217, also by Higham.
- cutoff = ei_abs(epsilon<Scalar>() * size * biggest_in_corner);
+ cutoff = ei_abs(NumTraits<Scalar>::epsilon() * RealScalar(size) * biggest_in_corner);
m_sign = ei_real(m_matrix.diagonal().coeff(index_of_biggest_in_corner)) > 0 ? 1 : -1;
}
diff --git a/Eigen/src/Core/AnyMatrixBase.h b/Eigen/src/Core/AnyMatrixBase.h
index cc2f4157c..a5d1cfe9f 100644
--- a/Eigen/src/Core/AnyMatrixBase.h
+++ b/Eigen/src/Core/AnyMatrixBase.h
@@ -100,9 +100,9 @@ template<typename Derived> struct AnyMatrixBase
/** \brief Copies the generic expression \a other into *this.
*
- * \details The expression must provide a (templated) evalTo(Derived& dst) const
- * function which does the actual job. In practice, this allows any user to write
- * its own special matrix without having to modify MatrixBase
+ * \details The expression must provide a (templated) evalTo(Derived& dst) const
+ * function which does the actual job. In practice, this allows any user to write
+ * its own special matrix without having to modify MatrixBase
*
* \returns a reference to *this.
*/
diff --git a/Eigen/src/Core/Assign.h b/Eigen/src/Core/Assign.h
index e5c17b3f4..9440cebf1 100644
--- a/Eigen/src/Core/Assign.h
+++ b/Eigen/src/Core/Assign.h
@@ -378,6 +378,31 @@ struct ei_assign_impl<Derived1, Derived2, InnerVectorizedTraversal, InnerUnrolli
*** Linear vectorization ***
***************************/
+template <bool IsAligned = false>
+struct ei_unaligned_assign_impl
+{
+ template <typename Derived, typename OtherDerived>
+ static EIGEN_STRONG_INLINE void run(const Derived&, OtherDerived&, int, int) {}
+};
+
+template <>
+struct ei_unaligned_assign_impl<false>
+{
+ // MSVC must not inline this functions. If it does, it fails to optimize the
+ // packet access path.
+#ifdef _MSC_VER
+ template <typename Derived, typename OtherDerived>
+ static EIGEN_DONT_INLINE void run(const Derived& src, OtherDerived& dst, int start, int end)
+#else
+ template <typename Derived, typename OtherDerived>
+ static EIGEN_STRONG_INLINE void run(const Derived& src, OtherDerived& dst, int start, int end)
+#endif
+ {
+ for (int index = start; index < end; ++index)
+ dst.copyCoeff(index, src);
+ }
+};
+
template<typename Derived1, typename Derived2>
struct ei_assign_impl<Derived1, Derived2, LinearVectorizedTraversal, NoUnrolling>
{
@@ -389,16 +414,14 @@ struct ei_assign_impl<Derived1, Derived2, LinearVectorizedTraversal, NoUnrolling
: ei_first_aligned(&dst.coeffRef(0), size);
const int alignedEnd = alignedStart + ((size-alignedStart)/packetSize)*packetSize;
- for(int index = 0; index < alignedStart; ++index)
- dst.copyCoeff(index, src);
+ ei_unaligned_assign_impl<ei_assign_traits<Derived1,Derived2>::DstIsAligned!=0>::run(src,dst,0,alignedStart);
for(int index = alignedStart; index < alignedEnd; index += packetSize)
{
dst.template copyPacket<Derived2, Aligned, ei_assign_traits<Derived1,Derived2>::SrcAlignment>(index, src);
}
- for(int index = alignedEnd; index < size; ++index)
- dst.copyCoeff(index, src);
+ ei_unaligned_assign_impl<>::run(src,dst,alignedEnd,size);
}
};
diff --git a/Eigen/src/Core/BandMatrix.h b/Eigen/src/Core/BandMatrix.h
index 67684eca3..538e6dd76 100644
--- a/Eigen/src/Core/BandMatrix.h
+++ b/Eigen/src/Core/BandMatrix.h
@@ -77,7 +77,7 @@ class BandMatrix : public AnyMatrixBase<BandMatrix<_Scalar,Rows,Cols,Supers,Subs
DataRowsAtCompileTime = ((Supers!=Dynamic) && (Subs!=Dynamic))
? 1 + Supers + Subs
: Dynamic,
- SizeAtCompileTime = EIGEN_ENUM_MIN(Rows,Cols)
+ SizeAtCompileTime = EIGEN_SIZE_MIN(Rows,Cols)
};
typedef Matrix<Scalar,DataRowsAtCompileTime,ColsAtCompileTime,Options&RowMajor?RowMajor:ColMajor> DataType;
@@ -136,6 +136,7 @@ class BandMatrix : public AnyMatrixBase<BandMatrix<_Scalar,Rows,Cols,Supers,Subs
DiagonalSize = (RowsAtCompileTime==Dynamic || ColsAtCompileTime==Dynamic)
? Dynamic
: (ActualIndex<0
+ // we handled Dynamic already, so can use EIGEN_ENUM_MIN safely here.
? EIGEN_ENUM_MIN(ColsAtCompileTime, RowsAtCompileTime + ActualIndex)
: EIGEN_ENUM_MIN(RowsAtCompileTime, ColsAtCompileTime - ActualIndex))
};
diff --git a/Eigen/src/Core/Block.h b/Eigen/src/Core/Block.h
index fa63d27dc..3b4234c22 100644
--- a/Eigen/src/Core/Block.h
+++ b/Eigen/src/Core/Block.h
@@ -86,7 +86,7 @@ template<typename MatrixType, int BlockRows, int BlockCols, int _DirectAccessSta
public:
typedef typename MatrixType::template MakeBase< Block<MatrixType, BlockRows, BlockCols, _DirectAccessStatus> >::Type Base;
- _EIGEN_DENSE_PUBLIC_INTERFACE(Block)
+ EIGEN_DENSE_PUBLIC_INTERFACE(Block)
class InnerIterator;
@@ -218,7 +218,7 @@ class Block<MatrixType,BlockRows,BlockCols,HasDirectAccess>
public:
typedef MapBase<Block, typename MatrixType::template MakeBase<Block>::Type> Base;
- _EIGEN_GENERIC_PUBLIC_INTERFACE(Block)
+ EIGEN_DENSE_PUBLIC_INTERFACE(Block)
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Block)
diff --git a/Eigen/src/Core/CwiseBinaryOp.h b/Eigen/src/Core/CwiseBinaryOp.h
index 990c553dd..9ed005dce 100644
--- a/Eigen/src/Core/CwiseBinaryOp.h
+++ b/Eigen/src/Core/CwiseBinaryOp.h
@@ -124,8 +124,11 @@ class CwiseBinaryOp : ei_no_assignment_operator,
EIGEN_STRONG_INLINE int rows() const { return m_lhs.rows(); }
EIGEN_STRONG_INLINE int cols() const { return m_lhs.cols(); }
+ /** \returns the left hand side nested expression */
const _LhsNested& lhs() const { return m_lhs; }
+ /** \returns the right hand side nested expression */
const _RhsNested& rhs() const { return m_rhs; }
+ /** \returns the functor representing the binary operation */
const BinaryOp& functor() const { return m_functor; }
protected:
@@ -138,11 +141,11 @@ template<typename BinaryOp, typename Lhs, typename Rhs>
class CwiseBinaryOpImpl<BinaryOp, Lhs, Rhs, Dense>
: public Lhs::template MakeBase< CwiseBinaryOp<BinaryOp, Lhs, Rhs> >::Type
{
+ typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> Derived;
public:
- typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> Derived;
typedef typename Lhs::template MakeBase< CwiseBinaryOp<BinaryOp, Lhs, Rhs> >::Type Base;
- _EIGEN_DENSE_PUBLIC_INTERFACE( Derived )
+ EIGEN_DENSE_PUBLIC_INTERFACE( Derived )
EIGEN_STRONG_INLINE const Scalar coeff(int row, int col) const
{
diff --git a/Eigen/src/Core/CwiseNullaryOp.h b/Eigen/src/Core/CwiseNullaryOp.h
index 129d558b4..5800335d7 100644
--- a/Eigen/src/Core/CwiseNullaryOp.h
+++ b/Eigen/src/Core/CwiseNullaryOp.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+// Copyright (C) 2008-2010 Gael Guennebaud <g.gael@free.fr>
//
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
@@ -60,7 +60,7 @@ class CwiseNullaryOp : ei_no_assignment_operator,
public:
typedef typename MatrixType::template MakeBase< CwiseNullaryOp<NullaryOp, MatrixType> >::Type Base;
- _EIGEN_DENSE_PUBLIC_INTERFACE(CwiseNullaryOp)
+ EIGEN_DENSE_PUBLIC_INTERFACE(CwiseNullaryOp)
CwiseNullaryOp(int rows, int cols, const NullaryOp& func = NullaryOp())
: m_rows(rows), m_cols(cols), m_functor(func)
@@ -80,23 +80,20 @@ class CwiseNullaryOp : ei_no_assignment_operator,
}
template<int LoadMode>
- EIGEN_STRONG_INLINE PacketScalar packet(int, int) const
+ EIGEN_STRONG_INLINE PacketScalar packet(int row, int col) const
{
- return m_functor.packetOp();
+ return m_functor.packetOp(row, col);
}
EIGEN_STRONG_INLINE const Scalar coeff(int index) const
{
- if(RowsAtCompileTime == 1)
- return m_functor(0, index);
- else
- return m_functor(index, 0);
+ return m_functor(index);
}
template<int LoadMode>
- EIGEN_STRONG_INLINE PacketScalar packet(int) const
+ EIGEN_STRONG_INLINE PacketScalar packet(int index) const
{
- return m_functor.packetOp();
+ return m_functor.packetOp(index);
}
protected:
@@ -228,6 +225,49 @@ DenseBase<Derived>::Constant(const Scalar& value)
return NullaryExpr(RowsAtCompileTime, ColsAtCompileTime, ei_scalar_constant_op<Scalar>(value));
}
+/**
+ * \brief Sets a linearly space vector.
+ *
+ * The function generates 'size' equally spaced values in the closed interval [low,high].
+ * This particular version of LinSpaced() uses sequential access, i.e. vector access is
+ * assumed to be a(0), a(1), ..., a(size). This assumption allows for better vectorization
+ * and yields faster code than the random access version.
+ *
+ * \only_for_vectors
+ *
+ * Example: \include DenseBase_LinSpaced_seq.cpp
+ * Output: \verbinclude DenseBase_LinSpaced_seq.out
+ *
+ * \sa setLinSpaced(const Scalar&,const Scalar&,int), LinSpaced(Scalar,Scalar,int), CwiseNullaryOp
+ */
+template<typename Derived>
+EIGEN_STRONG_INLINE const typename DenseBase<Derived>::SequentialLinSpacedReturnType
+DenseBase<Derived>::LinSpaced(Sequential_t, const Scalar& low, const Scalar& high, int size)
+{
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+ return NullaryExpr(size, ei_linspaced_op<Scalar,false>(low,high,size));
+}
+
+/**
+ * \brief Sets a linearly space vector.
+ *
+ * The function generates 'size' equally spaced values in the closed interval [low,high].
+ *
+ * \only_for_vectors
+ *
+ * Example: \include DenseBase_LinSpaced.cpp
+ * Output: \verbinclude DenseBase_LinSpaced.out
+ *
+ * \sa setLinSpaced(const Scalar&,const Scalar&,int), LinSpaced(Sequential_t,const Scalar&,const Scalar&,int), CwiseNullaryOp
+ */
+template<typename Derived>
+EIGEN_STRONG_INLINE const typename DenseBase<Derived>::RandomAccessLinSpacedReturnType
+DenseBase<Derived>::LinSpaced(const Scalar& low, const Scalar& high, int size)
+{
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+ return NullaryExpr(size, ei_linspaced_op<Scalar,true>(low,high,size));
+}
+
/** \returns true if all coefficients in this matrix are approximately equal to \a value, to within precision \a prec */
template<typename Derived>
bool DenseBase<Derived>::isApproxToConstant
@@ -305,6 +345,24 @@ DenseStorageBase<Derived,_Base,_Options>::setConstant(int rows, int cols, const
return setConstant(value);
}
+/**
+ * \brief Sets a linearly space vector.
+ *
+ * The function generates 'size' equally spaced values in the closed interval [low,high].
+ *
+ * \only_for_vectors
+ *
+ * Example: \include DenseBase_setLinSpaced.cpp
+ * Output: \verbinclude DenseBase_setLinSpaced.out
+ *
+ * \sa CwiseNullaryOp
+ */
+template<typename Derived>
+EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setLinSpaced(const Scalar& low, const Scalar& high, int size)
+{
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+ return derived() = Derived::NullaryExpr(size, ei_linspaced_op<Scalar,false>(low,high,size));
+}
// zero:
diff --git a/Eigen/src/Core/CwiseUnaryOp.h b/Eigen/src/Core/CwiseUnaryOp.h
index 1abf0fffb..9a6bc4bd5 100644
--- a/Eigen/src/Core/CwiseUnaryOp.h
+++ b/Eigen/src/Core/CwiseUnaryOp.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+// Copyright (C) 2008-2010 Gael Guennebaud <g.gael@free.fr>
// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
//
// Eigen is free software; you can redistribute it and/or
@@ -49,9 +49,9 @@ struct ei_traits<CwiseUnaryOp<UnaryOp, MatrixType> >
typedef typename MatrixType::Nested MatrixTypeNested;
typedef typename ei_unref<MatrixTypeNested>::type _MatrixTypeNested;
enum {
- Flags = (_MatrixTypeNested::Flags & (
+ Flags = _MatrixTypeNested::Flags & (
HereditaryBits | LinearAccessBit | AlignedBit
- | (ei_functor_traits<UnaryOp>::PacketAccess ? PacketAccessBit : 0))),
+ | (ei_functor_traits<UnaryOp>::PacketAccess ? PacketAccessBit : 0)),
CoeffReadCost = _MatrixTypeNested::CoeffReadCost + ei_functor_traits<UnaryOp>::Cost
};
};
@@ -74,16 +74,14 @@ class CwiseUnaryOp : ei_no_assignment_operator,
EIGEN_STRONG_INLINE int rows() const { return m_matrix.rows(); }
EIGEN_STRONG_INLINE int cols() const { return m_matrix.cols(); }
- /** \internal used for introspection */
- const UnaryOp& _functor() const { return m_functor; }
-
- /** \internal used for introspection */
- const typename ei_cleantype<typename MatrixType::Nested>::type&
- _expression() const { return m_matrix; }
+ /** \returns the functor representing the unary operation */
+ const UnaryOp& functor() const { return m_functor; }
+ /** \returns the nested expression */
const typename ei_cleantype<typename MatrixType::Nested>::type&
nestedExpression() const { return m_matrix; }
+ /** \returns the nested expression */
typename ei_cleantype<typename MatrixType::Nested>::type&
nestedExpression() { return m_matrix.const_cast_derived(); }
@@ -98,37 +96,33 @@ template<typename UnaryOp, typename MatrixType>
class CwiseUnaryOpImpl<UnaryOp,MatrixType,Dense>
: public MatrixType::template MakeBase< CwiseUnaryOp<UnaryOp, MatrixType> >::Type
{
- const typename ei_cleantype<typename MatrixType::Nested>::type& nestedExpression() const
- { return derived().nestedExpression(); }
- typename ei_cleantype<typename MatrixType::Nested>::type& nestedExpression()
- { return derived().nestedExpression(); }
+ typedef CwiseUnaryOp<UnaryOp, MatrixType> Derived;
public:
- typedef CwiseUnaryOp<UnaryOp, MatrixType> Derived;
typedef typename MatrixType::template MakeBase< CwiseUnaryOp<UnaryOp, MatrixType> >::Type Base;
- _EIGEN_DENSE_PUBLIC_INTERFACE( Derived )
+ EIGEN_DENSE_PUBLIC_INTERFACE(Derived)
EIGEN_STRONG_INLINE const Scalar coeff(int row, int col) const
{
- return derived()._functor()(nestedExpression().coeff(row, col));
+ return derived().functor()(derived().nestedExpression().coeff(row, col));
}
template<int LoadMode>
EIGEN_STRONG_INLINE PacketScalar packet(int row, int col) const
{
- return derived()._functor().packetOp(nestedExpression().template packet<LoadMode>(row, col));
+ return derived().functor().packetOp(derived().nestedExpression().template packet<LoadMode>(row, col));
}
EIGEN_STRONG_INLINE const Scalar coeff(int index) const
{
- return derived()._functor()(nestedExpression().coeff(index));
+ return derived().functor()(derived().nestedExpression().coeff(index));
}
template<int LoadMode>
EIGEN_STRONG_INLINE PacketScalar packet(int index) const
{
- return derived()._functor().packetOp(nestedExpression().template packet<LoadMode>(index));
+ return derived().functor().packetOp(derived().nestedExpression().template packet<LoadMode>(index));
}
};
diff --git a/Eigen/src/Core/CwiseUnaryView.h b/Eigen/src/Core/CwiseUnaryView.h
index 2ff5d3d45..2198ed226 100644
--- a/Eigen/src/Core/CwiseUnaryView.h
+++ b/Eigen/src/Core/CwiseUnaryView.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2009 Gael Guennebaud <g.gael@free.fr>
+// Copyright (C) 2009-2010 Gael Guennebaud <g.gael@free.fr>
//
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
@@ -72,12 +72,14 @@ class CwiseUnaryView : ei_no_assignment_operator,
EIGEN_STRONG_INLINE int rows() const { return m_matrix.rows(); }
EIGEN_STRONG_INLINE int cols() const { return m_matrix.cols(); }
- /** \internal used for introspection */
- const ViewOp& _functor() const { return m_functor; }
+ /** \returns the functor representing unary operation */
+ const ViewOp& functor() const { return m_functor; }
+ /** \returns the nested expression */
const typename ei_cleantype<typename MatrixType::Nested>::type&
nestedExpression() const { return m_matrix; }
+ /** \returns the nested expression */
typename ei_cleantype<typename MatrixType::Nested>::type&
nestedExpression() { return m_matrix.const_cast_derived(); }
@@ -88,36 +90,34 @@ class CwiseUnaryView : ei_no_assignment_operator,
};
template<typename ViewOp, typename MatrixType>
-class CwiseUnaryViewImpl<ViewOp,MatrixType,Dense> : public MatrixBase<CwiseUnaryView<ViewOp, MatrixType> >
+class CwiseUnaryViewImpl<ViewOp,MatrixType,Dense>
+ : public MatrixType::template MakeBase< CwiseUnaryView<ViewOp, MatrixType> >::Type
{
- const typename ei_cleantype<typename MatrixType::Nested>::type& nestedExpression() const
- { return derived().nestedExpression(); }
- typename ei_cleantype<typename MatrixType::Nested>::type& nestedExpression()
- { return derived().nestedExpression(); }
+ typedef CwiseUnaryView<ViewOp, MatrixType> Derived;
public:
- typedef CwiseUnaryView<ViewOp, MatrixType> Derived;
- EIGEN_DENSE_PUBLIC_INTERFACE( Derived )
+ typedef typename MatrixType::template MakeBase< CwiseUnaryView<ViewOp, MatrixType> >::Type Base;
+ EIGEN_DENSE_PUBLIC_INTERFACE(Derived)
EIGEN_STRONG_INLINE const Scalar coeff(int row, int col) const
{
- return derived()._functor()(nestedExpression().coeff(row, col));
+ return derived().functor()(derived().nestedExpression().coeff(row, col));
}
EIGEN_STRONG_INLINE const Scalar coeff(int index) const
{
- return derived()._functor()(nestedExpression().coeff(index));
+ return derived().functor()(derived().nestedExpression().coeff(index));
}
EIGEN_STRONG_INLINE Scalar& coeffRef(int row, int col)
{
- return derived()._functor()(nestedExpression().const_cast_derived().coeffRef(row, col));
+ return derived().functor()(const_cast_derived().nestedExpression().coeffRef(row, col));
}
EIGEN_STRONG_INLINE Scalar& coeffRef(int index)
{
- return derived()._functor()(nestedExpression().const_cast_derived().coeffRef(index));
+ return derived().functor()(const_cast_derived().nestedExpression().coeffRef(index));
}
};
diff --git a/Eigen/src/Core/DenseBase.h b/Eigen/src/Core/DenseBase.h
index e07b02a51..21d792f49 100644
--- a/Eigen/src/Core/DenseBase.h
+++ b/Eigen/src/Core/DenseBase.h
@@ -2,7 +2,7 @@
// for linear algebra.
//
// Copyright (C) 2006-2009 Benoit Jacob <jacob.benoit.1@gmail.com>
-// Copyright (C) 2008-2009 Gael Guennebaud <g.gael@free.fr>
+// Copyright (C) 2008-2010 Gael Guennebaud <g.gael@free.fr>
//
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
@@ -192,11 +192,15 @@ template<typename Derived> class DenseBase
/** \internal Represents a matrix with all coefficients equal to one another*/
typedef CwiseNullaryOp<ei_scalar_constant_op<Scalar>,Derived> ConstantReturnType;
+ /** \internal Represents a vector with linearly spaced coefficients that allows sequential access only. */
+ typedef CwiseNullaryOp<ei_linspaced_op<Scalar,false>,Derived> SequentialLinSpacedReturnType;
+ /** \internal Represents a vector with linearly spaced coefficients that allows random access. */
+ typedef CwiseNullaryOp<ei_linspaced_op<Scalar,true>,Derived> RandomAccessLinSpacedReturnType;
/** \internal the return type of MatrixBase::eigenvalues() */
typedef Matrix<typename NumTraits<typename ei_traits<Derived>::Scalar>::Real, ei_traits<Derived>::ColsAtCompileTime, 1> EigenvaluesReturnType;
- /** \internal expression tyepe of a column */
+ /** \internal expression type of a column */
typedef Block<Derived, ei_traits<Derived>::RowsAtCompileTime, 1> ColXpr;
- /** \internal expression tyepe of a column */
+ /** \internal expression type of a column */
typedef Block<Derived, 1, ei_traits<Derived>::ColsAtCompileTime> RowXpr;
#endif // not EIGEN_PARSED_BY_DOXYGEN
@@ -229,6 +233,9 @@ template<typename Derived> class DenseBase
CommaInitializer<Derived> operator<< (const Scalar& s);
+ template<unsigned int Added,unsigned int Removed>
+ const Flagged<Derived, Added, Removed> flagged() const;
+
template<typename OtherDerived>
CommaInitializer<Derived> operator<< (const DenseBase<OtherDerived>& other);
@@ -343,6 +350,11 @@ template<typename Derived> class DenseBase
static const ConstantReturnType
Constant(const Scalar& value);
+ static const SequentialLinSpacedReturnType
+ LinSpaced(Sequential_t, const Scalar& low, const Scalar& high, int size);
+ static const RandomAccessLinSpacedReturnType
+ LinSpaced(const Scalar& low, const Scalar& high, int size);
+
template<typename CustomNullaryOp>
static const CwiseNullaryOp<CustomNullaryOp, Derived>
NullaryExpr(int rows, int cols, const CustomNullaryOp& func);
@@ -362,24 +374,24 @@ template<typename Derived> class DenseBase
void fill(const Scalar& value);
Derived& setConstant(const Scalar& value);
+ Derived& setLinSpaced(const Scalar& low, const Scalar& high, int size);
Derived& setZero();
Derived& setOnes();
Derived& setRandom();
-
template<typename OtherDerived>
bool isApprox(const DenseBase<OtherDerived>& other,
- RealScalar prec = dummy_precision<Scalar>()) const;
+ RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
bool isMuchSmallerThan(const RealScalar& other,
- RealScalar prec = dummy_precision<Scalar>()) const;
+ RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
template<typename OtherDerived>
bool isMuchSmallerThan(const DenseBase<OtherDerived>& other,
- RealScalar prec = dummy_precision<Scalar>()) const;
+ RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
- bool isApproxToConstant(const Scalar& value, RealScalar prec = dummy_precision<Scalar>()) const;
- bool isConstant(const Scalar& value, RealScalar prec = dummy_precision<Scalar>()) const;
- bool isZero(RealScalar prec = dummy_precision<Scalar>()) const;
- bool isOnes(RealScalar prec = dummy_precision<Scalar>()) const;
+ bool isApproxToConstant(const Scalar& value, RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+ bool isConstant(const Scalar& value, RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+ bool isZero(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+ bool isOnes(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
inline Derived& operator*=(const Scalar& other);
inline Derived& operator/=(const Scalar& other);
@@ -474,6 +486,12 @@ template<typename Derived> class DenseBase
#include EIGEN_DENSEBASE_PLUGIN
#endif
+ // disable the use of evalTo for dense objects with a nice compilation error
+ template<typename Dest> inline void evalTo(Dest& dst) const
+ {
+ EIGEN_STATIC_ASSERT((ei_is_same_type<Dest,void>::ret),THE_EVAL_EVALTO_FUNCTION_SHOULD_NEVER_BE_CALLED_FOR_DENSE_OBJECTS);
+ }
+
protected:
/** Default constructor. Do nothing. */
DenseBase()
diff --git a/Eigen/src/Core/DenseStorageBase.h b/Eigen/src/Core/DenseStorageBase.h
index f3d6e8944..5c8e48768 100644
--- a/Eigen/src/Core/DenseStorageBase.h
+++ b/Eigen/src/Core/DenseStorageBase.h
@@ -62,7 +62,7 @@ class DenseStorageBase : public _Base<Derived>
typedef class Eigen::Map<Derived, Aligned> AlignedMapType;
protected:
- ei_matrix_storage<Scalar, MaxSizeAtCompileTime, RowsAtCompileTime, ColsAtCompileTime, Options> m_storage;
+ ei_matrix_storage<Scalar, Base::MaxSizeAtCompileTime, Base::RowsAtCompileTime, Base::ColsAtCompileTime, Options> m_storage;
public:
enum { NeedsToAlign = (!(Options&DontAlign))
@@ -371,8 +371,7 @@ class DenseStorageBase : public _Base<Derived>
: m_storage(other.derived().rows() * other.derived().cols(), other.derived().rows(), other.derived().cols())
{
_check_template_params();
- resize(other.rows(), other.cols());
- *this = other;
+ Base::operator=(other.derived());
}
/** \name Map
@@ -490,12 +489,8 @@ class DenseStorageBase : public _Base<Derived>
return ei_assign_selector<Derived,OtherDerived,false>::run(this->derived(), other.derived());
}
- static EIGEN_STRONG_INLINE void _check_template_params()
+ EIGEN_STRONG_INLINE void _check_template_params()
{
- #ifdef EIGEN_DEBUG_MATRIX_CTOR
- EIGEN_DEBUG_MATRIX_CTOR;
- #endif
-
EIGEN_STATIC_ASSERT(((RowsAtCompileTime >= MaxRowsAtCompileTime)
&& (ColsAtCompileTime >= MaxColsAtCompileTime)
&& (MaxRowsAtCompileTime >= 0)
diff --git a/Eigen/src/Core/Diagonal.h b/Eigen/src/Core/Diagonal.h
index 344dbca40..3720952cd 100644
--- a/Eigen/src/Core/Diagonal.h
+++ b/Eigen/src/Core/Diagonal.h
@@ -64,7 +64,7 @@ struct ei_traits<Diagonal<MatrixType,Index> >
};
template<typename MatrixType, int Index> class Diagonal
- : public MatrixBase<Diagonal<MatrixType, Index> >
+ : public MatrixType::template MakeBase< Diagonal<MatrixType,Index> >::Type
{
// some compilers may fail to optimize std::max etc in case of compile-time constants...
EIGEN_STRONG_INLINE int absIndex() const { return m_index.value()>0 ? m_index.value() : -m_index.value(); }
@@ -73,7 +73,8 @@ template<typename MatrixType, int Index> class Diagonal
public:
- EIGEN_GENERIC_PUBLIC_INTERFACE(Diagonal)
+ typedef typename MatrixType::template MakeBase<Diagonal>::Type Base;
+ EIGEN_DENSE_PUBLIC_INTERFACE(Diagonal)
inline Diagonal(const MatrixType& matrix, int index = Index) : m_matrix(matrix), m_index(index) {}
diff --git a/Eigen/src/Core/DiagonalMatrix.h b/Eigen/src/Core/DiagonalMatrix.h
index bd23b2e09..08c046611 100644
--- a/Eigen/src/Core/DiagonalMatrix.h
+++ b/Eigen/src/Core/DiagonalMatrix.h
@@ -68,10 +68,10 @@ class DiagonalBase : public AnyMatrixBase<Derived>
const DiagonalProduct<MatrixDerived, Derived, OnTheLeft>
operator*(const MatrixBase<MatrixDerived> &matrix) const;
- inline const DiagonalWrapper<NestByValue<CwiseUnaryOp<ei_scalar_inverse_op<Scalar>, DiagonalVectorType> > >
+ inline const DiagonalWrapper<CwiseUnaryOp<ei_scalar_inverse_op<Scalar>, DiagonalVectorType> >
inverse() const
{
- return diagonal().cwiseInverse().nestByValue();
+ return diagonal().cwiseInverse();
}
};
diff --git a/Eigen/src/Core/DiagonalProduct.h b/Eigen/src/Core/DiagonalProduct.h
index 87068d9ba..868b4419a 100644
--- a/Eigen/src/Core/DiagonalProduct.h
+++ b/Eigen/src/Core/DiagonalProduct.h
@@ -48,7 +48,8 @@ class DiagonalProduct : ei_no_assignment_operator,
{
public:
- EIGEN_GENERIC_PUBLIC_INTERFACE(DiagonalProduct)
+ typedef MatrixBase<DiagonalProduct> Base;
+ EIGEN_DENSE_PUBLIC_INTERFACE(DiagonalProduct)
inline DiagonalProduct(const MatrixType& matrix, const DiagonalType& diagonal)
: m_matrix(matrix), m_diagonal(diagonal)
diff --git a/Eigen/src/Core/ExpressionMaker.h b/Eigen/src/Core/ExpressionMaker.h
deleted file mode 100644
index 7e2b81d4a..000000000
--- a/Eigen/src/Core/ExpressionMaker.h
+++ /dev/null
@@ -1,55 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2009 Gael Guennebaud <g.gael@free.fr>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_EXPRESSIONMAKER_H
-#define EIGEN_EXPRESSIONMAKER_H
-
-// computes the shape of a matrix from its traits flag
-template<typename XprType> struct ei_shape_of
-{
- enum { ret = ei_traits<XprType>::Flags&SparseBit ? IsSparse : IsDense };
-};
-
-
-// Since the Sparse module is completely separated from the Core module, there is
-// no way to write the type of a generic expression working for both dense and sparse
-// matrix. Unless we change the overall design, here is a workaround.
-// There is an example in unsuported/Eigen/src/AutoDiff/AutoDiffScalar.
-
-template<typename Func, typename XprType, int Shape = ei_shape_of<XprType>::ret>
-struct MakeCwiseUnaryOp
-{
- typedef CwiseUnaryOp<Func,XprType> Type;
-};
-
-template<typename Func, typename A, typename B, int Shape = ei_shape_of<A>::ret>
-struct MakeCwiseBinaryOp
-{
- typedef CwiseBinaryOp<Func,A,B> Type;
-};
-
-// TODO complete the list
-
-
-#endif // EIGEN_EXPRESSIONMAKER_H
diff --git a/Eigen/src/Eigen2Support/Flagged.h b/Eigen/src/Core/Flagged.h
index bed110b64..7f42a1e73 100644
--- a/Eigen/src/Eigen2Support/Flagged.h
+++ b/Eigen/src/Core/Flagged.h
@@ -25,9 +25,7 @@
#ifndef EIGEN_FLAGGED_H
#define EIGEN_FLAGGED_H
-/** \deprecated it is only used by lazy() which is deprecated
- *
- * \class Flagged
+/** \class Flagged
*
* \brief Expression with modified flags
*
@@ -52,7 +50,8 @@ template<typename ExpressionType, unsigned int Added, unsigned int Removed> clas
{
public:
- EIGEN_GENERIC_PUBLIC_INTERFACE(Flagged)
+ typedef MatrixBase<Flagged> Base;
+ EIGEN_DENSE_PUBLIC_INTERFACE(Flagged)
typedef typename ei_meta_if<ei_must_nest_by_value<ExpressionType>::ret,
ExpressionType, const ExpressionType&>::ret ExpressionTypeNested;
typedef typename ExpressionType::InnerIterator InnerIterator;
@@ -119,58 +118,18 @@ template<typename ExpressionType, unsigned int Added, unsigned int Removed> clas
ExpressionTypeNested m_matrix;
};
-/** \deprecated it is only used by lazy() which is deprecated
- *
- * \returns an expression of *this with added flags
- *
- * Example: \include MatrixBase_marked.cpp
- * Output: \verbinclude MatrixBase_marked.out
- *
- * \sa class Flagged, extract(), part()
- */
-template<typename Derived>
-template<unsigned int Added>
-inline const Flagged<Derived, Added, 0>
-MatrixBase<Derived>::marked() const
-{
- return derived();
-}
-
-/** \deprecated use MatrixBase::noalias()
+/** \returns an expression of *this with added and removed flags
*
- * \returns an expression of *this with the EvalBeforeAssigningBit flag removed.
+ * This is mostly for internal use.
*
- * Example: \include MatrixBase_lazy.cpp
- * Output: \verbinclude MatrixBase_lazy.out
- *
- * \sa class Flagged, marked()
+ * \sa class Flagged
*/
template<typename Derived>
-inline const Flagged<Derived, 0, EvalBeforeAssigningBit>
-MatrixBase<Derived>::lazy() const
+template<unsigned int Added,unsigned int Removed>
+inline const Flagged<Derived, Added, Removed>
+DenseBase<Derived>::flagged() const
{
return derived();
}
-
-/** \internal
- * Overloaded to perform an efficient C += (A*B).lazy() */
-template<typename Derived>
-template<typename ProductDerived, typename Lhs, typename Rhs>
-Derived& MatrixBase<Derived>::operator+=(const Flagged<ProductBase<ProductDerived, Lhs,Rhs>, 0,
- EvalBeforeAssigningBit>& other)
-{
- other._expression().derived().addTo(derived()); return derived();
-}
-
-/** \internal
- * Overloaded to perform an efficient C -= (A*B).lazy() */
-template<typename Derived>
-template<typename ProductDerived, typename Lhs, typename Rhs>
-Derived& MatrixBase<Derived>::operator-=(const Flagged<ProductBase<ProductDerived, Lhs,Rhs>, 0,
- EvalBeforeAssigningBit>& other)
-{
- other._expression().derived().subTo(derived()); return derived();
-}
-
#endif // EIGEN_FLAGGED_H
diff --git a/Eigen/src/Core/ForceAlignedAccess.h b/Eigen/src/Core/ForceAlignedAccess.h
index b3fbb3c5c..927f43413 100644
--- a/Eigen/src/Core/ForceAlignedAccess.h
+++ b/Eigen/src/Core/ForceAlignedAccess.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2009 Gael Guennebaud <g.gael@free.fr>
+// Copyright (C) 2009-2010 Gael Guennebaud <g.gael@free.fr>
//
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
@@ -41,11 +41,12 @@ struct ei_traits<ForceAlignedAccess<ExpressionType> > : public ei_traits<Express
{};
template<typename ExpressionType> class ForceAlignedAccess
- : public MatrixBase<ForceAlignedAccess<ExpressionType> >
+ : public ExpressionType::template MakeBase< ForceAlignedAccess<ExpressionType> >::Type
{
public:
- EIGEN_GENERIC_PUBLIC_INTERFACE(ForceAlignedAccess)
+ typedef typename ExpressionType::template MakeBase<ForceAlignedAccess<ExpressionType> >::Type Base;
+ EIGEN_DENSE_PUBLIC_INTERFACE(ForceAlignedAccess)
inline ForceAlignedAccess(const ExpressionType& matrix) : m_expression(matrix) {}
diff --git a/Eigen/src/Core/Functors.h b/Eigen/src/Core/Functors.h
index aa3eba5cc..31d0cff70 100644
--- a/Eigen/src/Core/Functors.h
+++ b/Eigen/src/Core/Functors.h
@@ -437,7 +437,7 @@ struct ei_scalar_constant_op {
EIGEN_STRONG_INLINE ei_scalar_constant_op(const ei_scalar_constant_op& other) : m_other(other.m_other) { }
EIGEN_STRONG_INLINE ei_scalar_constant_op(const Scalar& other) : m_other(other) { }
EIGEN_STRONG_INLINE const Scalar operator() (int, int = 0) const { return m_other; }
- EIGEN_STRONG_INLINE const PacketScalar packetOp() const { return ei_pset1(m_other); }
+ EIGEN_STRONG_INLINE const PacketScalar packetOp(int, int = 0) const { return ei_pset1(m_other); }
const Scalar m_other;
};
template<typename Scalar>
@@ -452,6 +452,75 @@ template<typename Scalar>
struct ei_functor_traits<ei_scalar_identity_op<Scalar> >
{ enum { Cost = NumTraits<Scalar>::AddCost, PacketAccess = false, IsRepeatable = true }; };
+template <typename Scalar, bool RandomAccess> struct ei_linspaced_op_impl;
+
+// linear access for packet ops:
+// 1) initialization
+// base = [low, ..., low] + ([step, ..., step] * [-size, ..., 0])
+// 2) each step
+// base += [size*step, ..., size*step]
+template <typename Scalar>
+struct ei_linspaced_op_impl<Scalar,false>
+{
+ typedef typename ei_packet_traits<Scalar>::type PacketScalar;
+
+ ei_linspaced_op_impl(Scalar low, Scalar step) :
+ m_low(low), m_step(step),
+ m_packetStep(ei_pset1(ei_packet_traits<Scalar>::size*step)),
+ m_base(ei_padd(ei_pset1(low),ei_pmul(ei_pset1(step),ei_plset<Scalar>(-ei_packet_traits<Scalar>::size)))) {}
+
+ EIGEN_STRONG_INLINE const Scalar operator() (int i) const { return m_low+i*m_step; }
+ EIGEN_STRONG_INLINE const PacketScalar packetOp(int) const { return m_base = ei_padd(m_base,m_packetStep); }
+
+ const Scalar m_low;
+ const Scalar m_step;
+ const PacketScalar m_packetStep;
+ mutable PacketScalar m_base;
+};
+
+// random access for packet ops:
+// 1) each step
+// [low, ..., low] + ( [step, ..., step] * ( [i, ..., i] + [0, ..., size] ) )
+template <typename Scalar>
+struct ei_linspaced_op_impl<Scalar,true>
+{
+ typedef typename ei_packet_traits<Scalar>::type PacketScalar;
+
+ ei_linspaced_op_impl(Scalar low, Scalar step) :
+ m_low(low), m_step(step),
+ m_lowPacket(ei_pset1(m_low)), m_stepPacket(ei_pset1(m_step)), m_interPacket(ei_plset<Scalar>(0)) {}
+
+ EIGEN_STRONG_INLINE const Scalar operator() (int i) const { return m_low+i*m_step; }
+ EIGEN_STRONG_INLINE const PacketScalar packetOp(int i) const
+ { return ei_padd(m_lowPacket, ei_pmul(m_stepPacket, ei_padd(ei_pset1<Scalar>(i),m_interPacket))); }
+
+ const Scalar m_low;
+ const Scalar m_step;
+ const PacketScalar m_lowPacket;
+ const PacketScalar m_stepPacket;
+ const PacketScalar m_interPacket;
+};
+
+// ----- Linspace functor ----------------------------------------------------------------
+
+// Forward declaration (we default to random access which does not really give
+// us a speed gain when using packet access but it allows to use the functor in
+// nested expressions).
+template <typename Scalar, bool RandomAccess = true> struct ei_linspaced_op;
+template <typename Scalar, bool RandomAccess> struct ei_functor_traits< ei_linspaced_op<Scalar,RandomAccess> >
+{ enum { Cost = 1, PacketAccess = ei_packet_traits<Scalar>::size>1, IsRepeatable = true }; };
+template <typename Scalar, bool RandomAccess> struct ei_linspaced_op
+{
+ typedef typename ei_packet_traits<Scalar>::type PacketScalar;
+ ei_linspaced_op(Scalar low, Scalar high, int num_steps) : impl(low, (high-low)/(num_steps-1)) {}
+ EIGEN_STRONG_INLINE const Scalar operator() (int i, int = 0) const { return impl(i); }
+ EIGEN_STRONG_INLINE const PacketScalar packetOp(int i, int = 0) const { return impl.packetOp(i); }
+ // This proxy object handles the actual required temporaries, the different
+ // implementations (random vs. sequential access) as well as the piping
+ // correct piping to size 2/4 packet operations.
+ const ei_linspaced_op_impl<Scalar,RandomAccess> impl;
+};
+
// allow to add new functors and specializations of ei_functor_traits from outside Eigen.
// this macro is really needed because ei_functor_traits must be specialized after it is declared but before it is used...
#ifdef EIGEN_FUNCTORS_PLUGIN
diff --git a/Eigen/src/Core/GenericPacketMath.h b/Eigen/src/Core/GenericPacketMath.h
index ae1720eca..08981f89d 100644
--- a/Eigen/src/Core/GenericPacketMath.h
+++ b/Eigen/src/Core/GenericPacketMath.h
@@ -157,6 +157,10 @@ ei_ploadu(const Scalar* from) { return *from; }
template<typename Scalar> inline typename ei_packet_traits<Scalar>::type
ei_pset1(const Scalar& a) { return a; }
+/** \internal \brief Returns a packet with coefficients (a,a+1,...,a+packet_size-1). */
+template<typename Scalar> inline typename ei_packet_traits<Scalar>::type
+ei_plset(const Scalar& a) { return a; }
+
/** \internal copy the packet \a from to \a *to, \a to must be 16 bytes aligned */
template<typename Scalar, typename Packet> inline void ei_pstore(Scalar* to, const Packet& from)
{ (*to) = from; }
diff --git a/Eigen/src/Core/IO.h b/Eigen/src/Core/IO.h
index d132064a6..3e8d2bc66 100644
--- a/Eigen/src/Core/IO.h
+++ b/Eigen/src/Core/IO.h
@@ -143,9 +143,16 @@ std::ostream & ei_print_matrix(std::ostream & s, const Derived& _m, const IOForm
}
else if(fmt.precision == FullPrecision)
{
- explicit_precision = NumTraits<Scalar>::HasFloatingPoint
- ? std::ceil(-ei_log(epsilon<Scalar>())/ei_log(10.0))
- : 0;
+ if (NumTraits<Scalar>::HasFloatingPoint)
+ {
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ RealScalar explicit_precision_fp = std::ceil(-ei_log(NumTraits<Scalar>::epsilon())/ei_log(10.0));
+ explicit_precision = static_cast<std::streamsize>(explicit_precision_fp);
+ }
+ else
+ {
+ explicit_precision = 0;
+ }
}
else
{
diff --git a/Eigen/src/Core/Map.h b/Eigen/src/Core/Map.h
index f3939c09f..83688dbca 100644
--- a/Eigen/src/Core/Map.h
+++ b/Eigen/src/Core/Map.h
@@ -64,7 +64,7 @@ template<typename MatrixType, int Options> class Map
public:
typedef MapBase<Map,typename MatrixType::template MakeBase<Map>::Type> Base;
- _EIGEN_GENERIC_PUBLIC_INTERFACE(Map)
+ EIGEN_DENSE_PUBLIC_INTERFACE(Map)
inline int stride() const { return this->innerSize(); }
diff --git a/Eigen/src/Core/MapBase.h b/Eigen/src/Core/MapBase.h
index 4d5ec1eeb..a922d8bb0 100644
--- a/Eigen/src/Core/MapBase.h
+++ b/Eigen/src/Core/MapBase.h
@@ -88,7 +88,7 @@ template<typename Derived, typename Base> class MapBase
inline const Scalar& coeff(int index) const
{
ei_assert(Derived::IsVectorAtCompileTime || (ei_traits<Derived>::Flags & LinearAccessBit));
- if ( ((RowsAtCompileTime == 1) == IsRowMajor) )
+ if ( ((RowsAtCompileTime == 1) == IsRowMajor) || !int(Derived::IsVectorAtCompileTime) )
return m_data[index];
else
return m_data[index*stride()];
@@ -97,7 +97,7 @@ template<typename Derived, typename Base> class MapBase
inline Scalar& coeffRef(int index)
{
ei_assert(Derived::IsVectorAtCompileTime || (ei_traits<Derived>::Flags & LinearAccessBit));
- if ( ((RowsAtCompileTime == 1) == IsRowMajor) )
+ if ( ((RowsAtCompileTime == 1) == IsRowMajor) || !int(Derived::IsVectorAtCompileTime) )
return const_cast<Scalar*>(m_data)[index];
else
return const_cast<Scalar*>(m_data)[index*stride()];
@@ -170,7 +170,7 @@ template<typename Derived, typename Base> class MapBase
void checkDataAlignment() const
{
ei_assert( ((!(ei_traits<Derived>::Flags&AlignedBit))
- || ((std::size_t(m_data)&0xf)==0)) && "data is not aligned");
+ || ((size_t(m_data)&0xf)==0)) && "data is not aligned");
}
const Scalar* EIGEN_RESTRICT m_data;
diff --git a/Eigen/src/Core/MathFunctions.h b/Eigen/src/Core/MathFunctions.h
index 1ea0116b4..c97a68e50 100644
--- a/Eigen/src/Core/MathFunctions.h
+++ b/Eigen/src/Core/MathFunctions.h
@@ -25,13 +25,6 @@
#ifndef EIGEN_MATHFUNCTIONS_H
#define EIGEN_MATHFUNCTIONS_H
-template<typename T> inline typename NumTraits<T>::Real epsilon()
-{
- return std::numeric_limits<typename NumTraits<T>::Real>::epsilon();
-}
-
-template<typename T> inline typename NumTraits<T>::Real dummy_precision();
-
template<typename T> inline T ei_random(T a, T b);
template<typename T> inline T ei_random();
template<typename T> inline T ei_random_amplitude()
@@ -55,12 +48,11 @@ template<typename T> inline typename NumTraits<T>::Real ei_hypot(T x, T y)
*** int ***
**************/
-template<> inline int dummy_precision<int>() { return 0; }
inline int ei_real(int x) { return x; }
inline int& ei_real_ref(int& x) { return x; }
inline int ei_imag(int) { return 0; }
inline int ei_conj(int x) { return x; }
-inline int ei_abs(int x) { return abs(x); }
+inline int ei_abs(int x) { return std::abs(x); }
inline int ei_abs2(int x) { return x*x; }
inline int ei_sqrt(int) { ei_assert(false); return 0; }
inline int ei_exp(int) { ei_assert(false); return 0; }
@@ -86,21 +78,21 @@ inline int ei_pow(int x, int y)
template<> inline int ei_random(int a, int b)
{
// We can't just do rand()%n as only the high-order bits are really random
- return a + static_cast<int>((b-a+1) * (rand() / (RAND_MAX + 1.0)));
+ return a + static_cast<int>((b-a+1) * (std::rand() / (RAND_MAX + 1.0)));
}
template<> inline int ei_random()
{
return ei_random<int>(-ei_random_amplitude<int>(), ei_random_amplitude<int>());
}
-inline bool ei_isMuchSmallerThan(int a, int, int = dummy_precision<int>())
+inline bool ei_isMuchSmallerThan(int a, int, int = NumTraits<int>::dummy_precision())
{
return a == 0;
}
-inline bool ei_isApprox(int a, int b, int = dummy_precision<int>())
+inline bool ei_isApprox(int a, int b, int = NumTraits<int>::dummy_precision())
{
return a == b;
}
-inline bool ei_isApproxOrLessThan(int a, int b, int = dummy_precision<int>())
+inline bool ei_isApproxOrLessThan(int a, int b, int = NumTraits<int>::dummy_precision())
{
return a <= b;
}
@@ -109,7 +101,6 @@ inline bool ei_isApproxOrLessThan(int a, int b, int = dummy_precision<int>())
*** float ***
**************/
-template<> inline float dummy_precision<float>() { return 1e-5f; }
inline float ei_real(float x) { return x; }
inline float& ei_real_ref(float& x) { return x; }
inline float ei_imag(float) { return 0.f; }
@@ -140,15 +131,15 @@ template<> inline float ei_random()
{
return ei_random<float>(-ei_random_amplitude<float>(), ei_random_amplitude<float>());
}
-inline bool ei_isMuchSmallerThan(float a, float b, float prec = dummy_precision<float>())
+inline bool ei_isMuchSmallerThan(float a, float b, float prec = NumTraits<float>::dummy_precision())
{
return ei_abs(a) <= ei_abs(b) * prec;
}
-inline bool ei_isApprox(float a, float b, float prec = dummy_precision<float>())
+inline bool ei_isApprox(float a, float b, float prec = NumTraits<float>::dummy_precision())
{
return ei_abs(a - b) <= std::min(ei_abs(a), ei_abs(b)) * prec;
}
-inline bool ei_isApproxOrLessThan(float a, float b, float prec = dummy_precision<float>())
+inline bool ei_isApproxOrLessThan(float a, float b, float prec = NumTraits<float>::dummy_precision())
{
return a <= b || ei_isApprox(a, b, prec);
}
@@ -157,8 +148,6 @@ inline bool ei_isApproxOrLessThan(float a, float b, float prec = dummy_precision
*** double ***
**************/
-template<> inline double dummy_precision<double>() { return 1e-12; }
-
inline double ei_real(double x) { return x; }
inline double& ei_real_ref(double& x) { return x; }
inline double ei_imag(double) { return 0.; }
@@ -189,15 +178,15 @@ template<> inline double ei_random()
{
return ei_random<double>(-ei_random_amplitude<double>(), ei_random_amplitude<double>());
}
-inline bool ei_isMuchSmallerThan(double a, double b, double prec = dummy_precision<double>())
+inline bool ei_isMuchSmallerThan(double a, double b, double prec = NumTraits<double>::dummy_precision())
{
return ei_abs(a) <= ei_abs(b) * prec;
}
-inline bool ei_isApprox(double a, double b, double prec = dummy_precision<double>())
+inline bool ei_isApprox(double a, double b, double prec = NumTraits<double>::dummy_precision())
{
return ei_abs(a - b) <= std::min(ei_abs(a), ei_abs(b)) * prec;
}
-inline bool ei_isApproxOrLessThan(double a, double b, double prec = dummy_precision<double>())
+inline bool ei_isApproxOrLessThan(double a, double b, double prec = NumTraits<double>::dummy_precision())
{
return a <= b || ei_isApprox(a, b, prec);
}
@@ -206,7 +195,6 @@ inline bool ei_isApproxOrLessThan(double a, double b, double prec = dummy_precis
*** complex<float> ***
*********************/
-template<> inline float dummy_precision<std::complex<float> >() { return dummy_precision<float>(); }
inline float ei_real(const std::complex<float>& x) { return std::real(x); }
inline float ei_imag(const std::complex<float>& x) { return std::imag(x); }
inline float& ei_real_ref(std::complex<float>& x) { return reinterpret_cast<float*>(&x)[0]; }
@@ -225,15 +213,15 @@ template<> inline std::complex<float> ei_random()
{
return std::complex<float>(ei_random<float>(), ei_random<float>());
}
-inline bool ei_isMuchSmallerThan(const std::complex<float>& a, const std::complex<float>& b, float prec = dummy_precision<float>())
+inline bool ei_isMuchSmallerThan(const std::complex<float>& a, const std::complex<float>& b, float prec = NumTraits<float>::dummy_precision())
{
return ei_abs2(a) <= ei_abs2(b) * prec * prec;
}
-inline bool ei_isMuchSmallerThan(const std::complex<float>& a, float b, float prec = dummy_precision<float>())
+inline bool ei_isMuchSmallerThan(const std::complex<float>& a, float b, float prec = NumTraits<float>::dummy_precision())
{
return ei_abs2(a) <= ei_abs2(b) * prec * prec;
}
-inline bool ei_isApprox(const std::complex<float>& a, const std::complex<float>& b, float prec = dummy_precision<float>())
+inline bool ei_isApprox(const std::complex<float>& a, const std::complex<float>& b, float prec = NumTraits<float>::dummy_precision())
{
return ei_isApprox(ei_real(a), ei_real(b), prec)
&& ei_isApprox(ei_imag(a), ei_imag(b), prec);
@@ -244,7 +232,6 @@ inline bool ei_isApprox(const std::complex<float>& a, const std::complex<float>&
*** complex<double> ***
**********************/
-template<> inline double dummy_precision<std::complex<double> >() { return dummy_precision<double>(); }
inline double ei_real(const std::complex<double>& x) { return std::real(x); }
inline double ei_imag(const std::complex<double>& x) { return std::imag(x); }
inline double& ei_real_ref(std::complex<double>& x) { return reinterpret_cast<double*>(&x)[0]; }
@@ -263,15 +250,15 @@ template<> inline std::complex<double> ei_random()
{
return std::complex<double>(ei_random<double>(), ei_random<double>());
}
-inline bool ei_isMuchSmallerThan(const std::complex<double>& a, const std::complex<double>& b, double prec = dummy_precision<double>())
+inline bool ei_isMuchSmallerThan(const std::complex<double>& a, const std::complex<double>& b, double prec = NumTraits<double>::dummy_precision())
{
return ei_abs2(a) <= ei_abs2(b) * prec * prec;
}
-inline bool ei_isMuchSmallerThan(const std::complex<double>& a, double b, double prec = dummy_precision<double>())
+inline bool ei_isMuchSmallerThan(const std::complex<double>& a, double b, double prec = NumTraits<double>::dummy_precision())
{
return ei_abs2(a) <= ei_abs2(b) * prec * prec;
}
-inline bool ei_isApprox(const std::complex<double>& a, const std::complex<double>& b, double prec = dummy_precision<double>())
+inline bool ei_isApprox(const std::complex<double>& a, const std::complex<double>& b, double prec = NumTraits<double>::dummy_precision())
{
return ei_isApprox(ei_real(a), ei_real(b), prec)
&& ei_isApprox(ei_imag(a), ei_imag(b), prec);
@@ -283,7 +270,6 @@ inline bool ei_isApprox(const std::complex<double>& a, const std::complex<double
*** long double ***
******************/
-template<> inline long double dummy_precision<long double>() { return dummy_precision<double>(); }
inline long double ei_real(long double x) { return x; }
inline long double& ei_real_ref(long double& x) { return x; }
inline long double ei_imag(long double) { return 0.; }
@@ -306,15 +292,15 @@ template<> inline long double ei_random()
{
return ei_random<double>(-ei_random_amplitude<double>(), ei_random_amplitude<double>());
}
-inline bool ei_isMuchSmallerThan(long double a, long double b, long double prec = dummy_precision<long double>())
+inline bool ei_isMuchSmallerThan(long double a, long double b, long double prec = NumTraits<long double>::dummy_precision())
{
return ei_abs(a) <= ei_abs(b) * prec;
}
-inline bool ei_isApprox(long double a, long double b, long double prec = dummy_precision<long double>())
+inline bool ei_isApprox(long double a, long double b, long double prec = NumTraits<long double>::dummy_precision())
{
return ei_abs(a - b) <= std::min(ei_abs(a), ei_abs(b)) * prec;
}
-inline bool ei_isApproxOrLessThan(long double a, long double b, long double prec = dummy_precision<long double>())
+inline bool ei_isApproxOrLessThan(long double a, long double b, long double prec = NumTraits<long double>::dummy_precision())
{
return a <= b || ei_isApprox(a, b, prec);
}
@@ -323,7 +309,6 @@ inline bool ei_isApproxOrLessThan(long double a, long double b, long double prec
*** bool ***
**************/
-template<> inline bool dummy_precision<bool>() { return 0; }
inline bool ei_real(bool x) { return x; }
inline bool& ei_real_ref(bool& x) { return x; }
inline bool ei_imag(bool) { return 0; }
@@ -336,15 +321,15 @@ template<> inline bool ei_random()
{
return (ei_random<int>(0,1) == 1);
}
-inline bool ei_isMuchSmallerThan(bool a, bool, bool = dummy_precision<bool>())
+inline bool ei_isMuchSmallerThan(bool a, bool, bool = NumTraits<bool>::dummy_precision())
{
return !a;
}
-inline bool ei_isApprox(bool a, bool b, bool = dummy_precision<bool>())
+inline bool ei_isApprox(bool a, bool b, bool = NumTraits<bool>::dummy_precision())
{
return a == b;
}
-inline bool ei_isApproxOrLessThan(bool a, bool b, bool = dummy_precision<bool>())
+inline bool ei_isApproxOrLessThan(bool a, bool b, bool = NumTraits<bool>::dummy_precision())
{
return int(a) <= int(b);
}
diff --git a/Eigen/src/Core/Matrix.h b/Eigen/src/Core/Matrix.h
index f309f8abd..6f194ffba 100644
--- a/Eigen/src/Core/Matrix.h
+++ b/Eigen/src/Core/Matrix.h
@@ -134,10 +134,10 @@ class Matrix
* \sa DenseStorageBase
*/
typedef DenseStorageBase<Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>, Eigen::MatrixBase, _Options> Base;
-
+
enum { Options = _Options };
- _EIGEN_GENERIC_PUBLIC_INTERFACE(Matrix)
+ EIGEN_DENSE_PUBLIC_INTERFACE(Matrix)
typedef typename Base::PlainMatrixType PlainMatrixType;
@@ -297,7 +297,7 @@ class Matrix
}
/** \brief Copy constructor for generic expressions.
- * \sa MatrixBase::operator=(const AnyMatrixBase<OtherDerived>&)
+ * \sa MatrixBase::operator=(const AnyMatrixBase<OtherDerived>&)
*/
template<typename OtherDerived>
EIGEN_STRONG_INLINE Matrix(const AnyMatrixBase<OtherDerived> &other)
@@ -311,7 +311,7 @@ class Matrix
}
/** \internal
- * \brief Override MatrixBase::swap() since for dynamic-sized matrices
+ * \brief Override MatrixBase::swap() since for dynamic-sized matrices
* of same type it is enough to swap the data pointers.
*/
template<typename OtherDerived>
diff --git a/Eigen/src/Core/MatrixBase.h b/Eigen/src/Core/MatrixBase.h
index 3f1f2a6b9..229195046 100644
--- a/Eigen/src/Core/MatrixBase.h
+++ b/Eigen/src/Core/MatrixBase.h
@@ -119,8 +119,8 @@ template<typename Derived> class MatrixBase
/** \brief The plain matrix type corresponding to this expression.
*
- * This is not necessarily exactly the return type of eval(). In the case of plain matrices,
- * the return type of eval() is a const reference to a matrix, not a matrix! It is however guaranteed
+ * This is not necessarily exactly the return type of eval(). In the case of plain matrices,
+ * the return type of eval() is a const reference to a matrix, not a matrix! It is however guaranteed
* that the return type of eval() is either PlainMatrixType or const PlainMatrixType&.
*/
typedef Matrix<typename ei_traits<Derived>::Scalar,
@@ -189,6 +189,10 @@ template<typename Derived> class MatrixBase
operator*(const MatrixBase<OtherDerived> &other) const;
template<typename OtherDerived>
+ const typename ProductReturnType<Derived,OtherDerived,LazyCoeffBasedProductMode>::Type
+ lazyProduct(const MatrixBase<OtherDerived> &other) const;
+
+ template<typename OtherDerived>
Derived& operator*=(const AnyMatrixBase<OtherDerived>& other);
template<typename OtherDerived>
@@ -249,16 +253,16 @@ template<typename Derived> class MatrixBase
Derived& setIdentity();
Derived& setIdentity(int rows, int cols);
- bool isIdentity(RealScalar prec = dummy_precision<Scalar>()) const;
- bool isDiagonal(RealScalar prec = dummy_precision<Scalar>()) const;
+ bool isIdentity(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+ bool isDiagonal(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
- bool isUpperTriangular(RealScalar prec = dummy_precision<Scalar>()) const;
- bool isLowerTriangular(RealScalar prec = dummy_precision<Scalar>()) const;
+ bool isUpperTriangular(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+ bool isLowerTriangular(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
template<typename OtherDerived>
bool isOrthogonal(const MatrixBase<OtherDerived>& other,
- RealScalar prec = dummy_precision<Scalar>()) const;
- bool isUnitary(RealScalar prec = dummy_precision<Scalar>()) const;
+ RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+ bool isUnitary(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
/** \returns true if each coefficients of \c *this and \a other are all exactly equal.
* \warning When using floating point scalar values you probably should rather use a
@@ -278,13 +282,11 @@ template<typename Derived> class MatrixBase
NoAlias<Derived,Eigen::MatrixBase > noalias();
- inline const NestByValue<Derived> nestByValue() const;
inline const ForceAlignedAccess<Derived> forceAlignedAccess() const;
inline ForceAlignedAccess<Derived> forceAlignedAccess();
template<bool Enable> inline const typename ei_meta_if<Enable,ForceAlignedAccess<Derived>,Derived&>::ret forceAlignedAccessIf() const;
template<bool Enable> inline typename ei_meta_if<Enable,ForceAlignedAccess<Derived>,Derived&>::ret forceAlignedAccessIf();
- Scalar mean() const;
Scalar trace() const;
/////////// Array module ///////////
@@ -308,13 +310,13 @@ template<typename Derived> class MatrixBase
ResultType& inverse,
typename ResultType::Scalar& determinant,
bool& invertible,
- const RealScalar& absDeterminantThreshold = dummy_precision<Scalar>()
+ const RealScalar& absDeterminantThreshold = NumTraits<Scalar>::dummy_precision()
) const;
template<typename ResultType>
void computeInverseWithCheck(
ResultType& inverse,
bool& invertible,
- const RealScalar& absDeterminantThreshold = dummy_precision<Scalar>()
+ const RealScalar& absDeterminantThreshold = NumTraits<Scalar>::dummy_precision()
) const;
Scalar determinant() const;
diff --git a/Eigen/src/Core/MatrixStorage.h b/Eigen/src/Core/MatrixStorage.h
index 584ba8ca3..046670452 100644
--- a/Eigen/src/Core/MatrixStorage.h
+++ b/Eigen/src/Core/MatrixStorage.h
@@ -26,6 +26,12 @@
#ifndef EIGEN_MATRIXSTORAGE_H
#define EIGEN_MATRIXSTORAGE_H
+#ifdef EIGEN_DEBUG_MATRIX_CTOR
+ #define EIGEN_INT_DEBUG_MATRIX_CTOR EIGEN_DEBUG_MATRIX_CTOR;
+#else
+ #define EIGEN_INT_DEBUG_MATRIX_CTOR
+#endif
+
struct ei_constructor_without_unaligned_array_assert {};
/** \internal
@@ -183,7 +189,8 @@ template<typename T, int _Options> class ei_matrix_storage<T, Dynamic, Dynamic,
inline ei_matrix_storage(ei_constructor_without_unaligned_array_assert)
: m_data(0), m_rows(0), m_cols(0) {}
inline ei_matrix_storage(int size, int rows, int cols)
- : m_data(ei_conditional_aligned_new<T,(_Options&DontAlign)==0>(size)), m_rows(rows), m_cols(cols) {}
+ : m_data(ei_conditional_aligned_new<T,(_Options&DontAlign)==0>(size)), m_rows(rows), m_cols(cols)
+ { EIGEN_INT_DEBUG_MATRIX_CTOR }
inline ~ei_matrix_storage() { ei_conditional_aligned_delete<T,(_Options&DontAlign)==0>(m_data, m_rows*m_cols); }
inline void swap(ei_matrix_storage& other)
{ std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); }
@@ -198,6 +205,7 @@ template<typename T, int _Options> class ei_matrix_storage<T, Dynamic, Dynamic,
m_data = ei_conditional_aligned_new<T,(_Options&DontAlign)==0>(size);
else
m_data = 0;
+ EIGEN_INT_DEBUG_MATRIX_CTOR
}
m_rows = rows;
m_cols = cols;
@@ -214,7 +222,8 @@ template<typename T, int _Rows, int _Options> class ei_matrix_storage<T, Dynamic
public:
inline explicit ei_matrix_storage() : m_data(0), m_cols(0) {}
inline ei_matrix_storage(ei_constructor_without_unaligned_array_assert) : m_data(0), m_cols(0) {}
- inline ei_matrix_storage(int size, int, int cols) : m_data(ei_conditional_aligned_new<T,(_Options&DontAlign)==0>(size)), m_cols(cols) {}
+ inline ei_matrix_storage(int size, int, int cols) : m_data(ei_conditional_aligned_new<T,(_Options&DontAlign)==0>(size)), m_cols(cols)
+ { EIGEN_INT_DEBUG_MATRIX_CTOR }
inline ~ei_matrix_storage() { ei_conditional_aligned_delete<T,(_Options&DontAlign)==0>(m_data, _Rows*m_cols); }
inline void swap(ei_matrix_storage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); }
inline static int rows(void) {return _Rows;}
@@ -228,6 +237,7 @@ template<typename T, int _Rows, int _Options> class ei_matrix_storage<T, Dynamic
m_data = ei_conditional_aligned_new<T,(_Options&DontAlign)==0>(size);
else
m_data = 0;
+ EIGEN_INT_DEBUG_MATRIX_CTOR
}
m_cols = cols;
}
@@ -243,7 +253,8 @@ template<typename T, int _Cols, int _Options> class ei_matrix_storage<T, Dynamic
public:
inline explicit ei_matrix_storage() : m_data(0), m_rows(0) {}
inline ei_matrix_storage(ei_constructor_without_unaligned_array_assert) : m_data(0), m_rows(0) {}
- inline ei_matrix_storage(int size, int rows, int) : m_data(ei_conditional_aligned_new<T,(_Options&DontAlign)==0>(size)), m_rows(rows) {}
+ inline ei_matrix_storage(int size, int rows, int) : m_data(ei_conditional_aligned_new<T,(_Options&DontAlign)==0>(size)), m_rows(rows)
+ { EIGEN_INT_DEBUG_MATRIX_CTOR }
inline ~ei_matrix_storage() { ei_conditional_aligned_delete<T,(_Options&DontAlign)==0>(m_data, _Cols*m_rows); }
inline void swap(ei_matrix_storage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); }
inline int rows(void) const {return m_rows;}
@@ -257,6 +268,7 @@ template<typename T, int _Cols, int _Options> class ei_matrix_storage<T, Dynamic
m_data = ei_conditional_aligned_new<T,(_Options&DontAlign)==0>(size);
else
m_data = 0;
+ EIGEN_INT_DEBUG_MATRIX_CTOR
}
m_rows = rows;
}
diff --git a/Eigen/src/Core/Minor.h b/Eigen/src/Core/Minor.h
index 629dbe609..e7e164a16 100644
--- a/Eigen/src/Core/Minor.h
+++ b/Eigen/src/Core/Minor.h
@@ -64,7 +64,8 @@ template<typename MatrixType> class Minor
{
public:
- EIGEN_GENERIC_PUBLIC_INTERFACE(Minor)
+ typedef MatrixBase<Minor> Base;
+ EIGEN_DENSE_PUBLIC_INTERFACE(Minor)
inline Minor(const MatrixType& matrix,
int row, int col)
diff --git a/Eigen/src/Core/NestByValue.h b/Eigen/src/Core/NestByValue.h
index 85a672779..9f6d1c0c0 100644
--- a/Eigen/src/Core/NestByValue.h
+++ b/Eigen/src/Core/NestByValue.h
@@ -42,11 +42,12 @@ struct ei_traits<NestByValue<ExpressionType> > : public ei_traits<ExpressionType
{};
template<typename ExpressionType> class NestByValue
- : public MatrixBase<NestByValue<ExpressionType> >
+ : public ExpressionType::template MakeBase< NestByValue<ExpressionType> >::Type
{
public:
- EIGEN_GENERIC_PUBLIC_INTERFACE(NestByValue)
+ typedef typename ExpressionType::template MakeBase<NestByValue<ExpressionType> >::Type Base;
+ EIGEN_DENSE_PUBLIC_INTERFACE(NestByValue)
inline NestByValue(const ExpressionType& matrix) : m_expression(matrix) {}
@@ -97,7 +98,7 @@ template<typename ExpressionType> class NestByValue
{
m_expression.const_cast_derived().template writePacket<LoadMode>(index, x);
}
-
+
operator const ExpressionType&() const { return m_expression; }
protected:
@@ -108,7 +109,7 @@ template<typename ExpressionType> class NestByValue
*/
template<typename Derived>
inline const NestByValue<Derived>
-MatrixBase<Derived>::nestByValue() const
+DenseBase<Derived>::nestByValue() const
{
return NestByValue<Derived>(derived());
}
diff --git a/Eigen/src/Core/NoAlias.h b/Eigen/src/Core/NoAlias.h
index bfea5c91d..30ddbeb3c 100644
--- a/Eigen/src/Core/NoAlias.h
+++ b/Eigen/src/Core/NoAlias.h
@@ -69,6 +69,14 @@ class NoAlias
template<typename ProductDerived, typename Lhs, typename Rhs>
EIGEN_STRONG_INLINE ExpressionType& operator-=(const ProductBase<ProductDerived, Lhs,Rhs>& other)
{ other.derived().subTo(m_expression); return m_expression; }
+
+ template<typename Lhs, typename Rhs, int NestingFlags>
+ EIGEN_STRONG_INLINE ExpressionType& operator+=(const CoeffBasedProduct<Lhs,Rhs,NestingFlags>& other)
+ { return m_expression.derived() += CoeffBasedProduct<Lhs,Rhs,NestByRefBit>(other.lhs(), other.rhs()); }
+
+ template<typename Lhs, typename Rhs, int NestingFlags>
+ EIGEN_STRONG_INLINE ExpressionType& operator-=(const CoeffBasedProduct<Lhs,Rhs,NestingFlags>& other)
+ { return m_expression.derived() -= CoeffBasedProduct<Lhs,Rhs,NestByRefBit>(other.lhs(), other.rhs()); }
#endif
protected:
diff --git a/Eigen/src/Core/NumTraits.h b/Eigen/src/Core/NumTraits.h
index 304e2c1d6..37787b569 100644
--- a/Eigen/src/Core/NumTraits.h
+++ b/Eigen/src/Core/NumTraits.h
@@ -34,7 +34,7 @@
* \c std::complex<float>, \c std::complex<double>, and \c long \c double (especially
* useful to enforce x87 arithmetics when SSE is the default).
*
- * The provided data consists of:
+ * The provided data consists of everything that is supported by std::numeric_limits, plus:
* \li A typedef \a Real, giving the "real part" type of \a T. If \a T is already real,
* then \a Real is just a typedef to \a T. If \a T is \c std::complex<U> then \a Real
* is a typedef to \a U.
@@ -45,10 +45,29 @@
* type, and to 0 otherwise.
* \li An enum \a HasFloatingPoint. It is equal to \c 0 if \a T is \c int,
* and to \c 1 otherwise.
+ * \li An epsilon() function which, unlike std::numeric_limits::epsilon(), returns a \a Real instead of a \a T.
+ * \li A dummy_precision() function returning a weak epsilon value. It is mainly used by the fuzzy comparison operators.
+ * \li Two highest() and lowest() functions returning the highest and lowest possible values respectively.
*/
template<typename T> struct NumTraits;
+template<typename T> struct ei_default_float_numtraits
+ : std::numeric_limits<T>
+{
+ inline static T highest() { return std::numeric_limits<T>::max(); }
+ inline static T lowest() { return -std::numeric_limits<T>::max(); }
+};
+
+template<typename T> struct ei_default_integral_numtraits
+ : std::numeric_limits<T>
+{
+ inline static T dummy_precision() { return T(0); }
+ inline static T highest() { return std::numeric_limits<T>::max(); }
+ inline static T lowest() { return std::numeric_limits<T>::min(); }
+};
+
template<> struct NumTraits<int>
+ : ei_default_integral_numtraits<int>
{
typedef int Real;
typedef double FloatingPoint;
@@ -63,6 +82,7 @@ template<> struct NumTraits<int>
};
template<> struct NumTraits<float>
+ : ei_default_float_numtraits<float>
{
typedef float Real;
typedef float FloatingPoint;
@@ -74,9 +94,12 @@ template<> struct NumTraits<float>
AddCost = 1,
MulCost = 1
};
+
+ inline static float dummy_precision() { return 1e-5f; }
};
template<> struct NumTraits<double>
+ : ei_default_float_numtraits<double>
{
typedef double Real;
typedef double FloatingPoint;
@@ -88,9 +111,12 @@ template<> struct NumTraits<double>
AddCost = 1,
MulCost = 1
};
+
+ inline static double dummy_precision() { return 1e-12; }
};
template<typename _Real> struct NumTraits<std::complex<_Real> >
+ : ei_default_float_numtraits<std::complex<_Real> >
{
typedef _Real Real;
typedef std::complex<_Real> FloatingPoint;
@@ -102,9 +128,13 @@ template<typename _Real> struct NumTraits<std::complex<_Real> >
AddCost = 2 * NumTraits<Real>::AddCost,
MulCost = 4 * NumTraits<Real>::MulCost + 2 * NumTraits<Real>::AddCost
};
+
+ inline static Real epsilon() { return std::numeric_limits<Real>::epsilon(); }
+ inline static Real dummy_precision() { return NumTraits<Real>::dummy_precision(); }
};
template<> struct NumTraits<long long int>
+ : ei_default_integral_numtraits<long long int>
{
typedef long long int Real;
typedef long double FloatingPoint;
@@ -119,6 +149,7 @@ template<> struct NumTraits<long long int>
};
template<> struct NumTraits<long double>
+ : ei_default_float_numtraits<long double>
{
typedef long double Real;
typedef long double FloatingPoint;
@@ -130,9 +161,12 @@ template<> struct NumTraits<long double>
AddCost = 1,
MulCost = 1
};
+
+ static inline long double dummy_precision() { return NumTraits<double>::dummy_precision(); }
};
template<> struct NumTraits<bool>
+ : ei_default_integral_numtraits<bool>
{
typedef bool Real;
typedef float FloatingPoint;
diff --git a/Eigen/src/Core/Product.h b/Eigen/src/Core/Product.h
index 400ff06f3..af05773ee 100644
--- a/Eigen/src/Core/Product.h
+++ b/Eigen/src/Core/Product.h
@@ -54,8 +54,6 @@ enum {
Small = Dynamic/2
};
-enum { OuterProduct, InnerProduct, UnrolledProduct, GemvProduct, GemmProduct };
-
template<typename Lhs, typename Rhs> struct ei_product_type
{
typedef typename ei_cleantype<Lhs>::type _Lhs;
@@ -89,9 +87,12 @@ public:
template<int Rows, int Cols> struct ei_product_type_selector<Rows, Cols, 1> { enum { ret = OuterProduct }; };
template<int Depth> struct ei_product_type_selector<1, 1, Depth> { enum { ret = InnerProduct }; };
template<> struct ei_product_type_selector<1, 1, 1> { enum { ret = InnerProduct }; };
-template<> struct ei_product_type_selector<Small,1, Small> { enum { ret = UnrolledProduct }; };
-template<> struct ei_product_type_selector<1, Small,Small> { enum { ret = UnrolledProduct }; };
-template<> struct ei_product_type_selector<Small,Small,Small> { enum { ret = UnrolledProduct }; };
+template<> struct ei_product_type_selector<Small,1, Small> { enum { ret = CoeffBasedProductMode }; };
+template<> struct ei_product_type_selector<1, Small,Small> { enum { ret = CoeffBasedProductMode }; };
+template<> struct ei_product_type_selector<Small,Small,Small> { enum { ret = CoeffBasedProductMode }; };
+template<> struct ei_product_type_selector<Small, Small, 1> { enum { ret = LazyCoeffBasedProductMode }; };
+template<> struct ei_product_type_selector<Small, Large, 1> { enum { ret = LazyCoeffBasedProductMode }; };
+template<> struct ei_product_type_selector<Large, Small, 1> { enum { ret = LazyCoeffBasedProductMode }; };
template<> struct ei_product_type_selector<1, Large,Small> { enum { ret = GemvProduct }; };
template<> struct ei_product_type_selector<1, Large,Large> { enum { ret = GemvProduct }; };
template<> struct ei_product_type_selector<1, Small,Large> { enum { ret = GemvProduct }; };
@@ -133,11 +134,19 @@ struct ProductReturnType
};
template<typename Lhs, typename Rhs>
-struct ProductReturnType<Lhs,Rhs,UnrolledProduct>
+struct ProductReturnType<Lhs,Rhs,CoeffBasedProductMode>
+{
+ typedef typename ei_nested<Lhs, Rhs::ColsAtCompileTime, typename ei_plain_matrix_type<Lhs>::type >::type LhsNested;
+ typedef typename ei_nested<Rhs, Lhs::RowsAtCompileTime, typename ei_plain_matrix_type<Rhs>::type >::type RhsNested;
+ typedef CoeffBasedProduct<LhsNested, RhsNested, EvalBeforeAssigningBit | EvalBeforeNestingBit> Type;
+};
+
+template<typename Lhs, typename Rhs>
+struct ProductReturnType<Lhs,Rhs,LazyCoeffBasedProductMode>
{
typedef typename ei_nested<Lhs, Rhs::ColsAtCompileTime, typename ei_plain_matrix_type<Lhs>::type >::type LhsNested;
typedef typename ei_nested<Rhs, Lhs::RowsAtCompileTime, typename ei_plain_matrix_type<Rhs>::type >::type RhsNested;
- typedef GeneralProduct<LhsNested, RhsNested, UnrolledProduct> Type;
+ typedef CoeffBasedProduct<LhsNested, RhsNested, NestByRefBit> Type;
};
@@ -411,7 +420,7 @@ template<> struct ei_gemv_selector<OnTheRight,RowMajor,false>
*
* \note If instead of the matrix product you want the coefficient-wise product, see Cwise::operator*().
*
- * \sa lazy(), operator*=(const MatrixBase&), Cwise::operator*()
+ * \sa lazyProduct(), operator*=(const MatrixBase&), Cwise::operator*()
*/
template<typename Derived>
template<typename OtherDerived>
@@ -436,4 +445,39 @@ MatrixBase<Derived>::operator*(const MatrixBase<OtherDerived> &other) const
return typename ProductReturnType<Derived,OtherDerived>::Type(derived(), other.derived());
}
+/** \returns an expression of the matrix product of \c *this and \a other without implicit evaluation.
+ *
+ * The returned product will behave like any other expressions: the coefficients of the product will be
+ * computed once at a time as requested. This might be useful in some extremely rare cases when only
+ * a small and no coherent fraction of the result's coefficients have to be computed.
+ *
+ * \warning This version of the matrix product can be much much slower. So use it only if you know
+ * what you are doing and that you measured a true speed improvement.
+ *
+ * \sa operator*(const MatrixBase&)
+ */
+template<typename Derived>
+template<typename OtherDerived>
+const typename ProductReturnType<Derived,OtherDerived,LazyCoeffBasedProductMode>::Type
+MatrixBase<Derived>::lazyProduct(const MatrixBase<OtherDerived> &other) const
+{
+ enum {
+ ProductIsValid = Derived::ColsAtCompileTime==Dynamic
+ || OtherDerived::RowsAtCompileTime==Dynamic
+ || int(Derived::ColsAtCompileTime)==int(OtherDerived::RowsAtCompileTime),
+ AreVectors = Derived::IsVectorAtCompileTime && OtherDerived::IsVectorAtCompileTime,
+ SameSizes = EIGEN_PREDICATE_SAME_MATRIX_SIZE(Derived,OtherDerived)
+ };
+ // note to the lost user:
+ // * for a dot product use: v1.dot(v2)
+ // * for a coeff-wise product use: v1.cwiseProduct(v2)
+ EIGEN_STATIC_ASSERT(ProductIsValid || !(AreVectors && SameSizes),
+ INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS)
+ EIGEN_STATIC_ASSERT(ProductIsValid || !(SameSizes && !AreVectors),
+ INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION)
+ EIGEN_STATIC_ASSERT(ProductIsValid || SameSizes, INVALID_MATRIX_PRODUCT)
+
+ return typename ProductReturnType<Derived,OtherDerived,LazyCoeffBasedProductMode>::Type(derived(), other.derived());
+}
+
#endif // EIGEN_PRODUCT_H
diff --git a/Eigen/src/Core/ProductBase.h b/Eigen/src/Core/ProductBase.h
index 1d19ef72a..481e7c760 100644
--- a/Eigen/src/Core/ProductBase.h
+++ b/Eigen/src/Core/ProductBase.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2009 Gael Guennebaud <g.gael@free.fr>
+// Copyright (C) 2009-2010 Gael Guennebaud <g.gael@free.fr>
//
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
@@ -42,21 +42,15 @@ struct ei_traits<ProductBase<Derived,_Lhs,_Rhs> > //: ei_traits<typename ei_clea
ColsAtCompileTime = ei_traits<Rhs>::ColsAtCompileTime,
MaxRowsAtCompileTime = ei_traits<Lhs>::MaxRowsAtCompileTime,
MaxColsAtCompileTime = ei_traits<Rhs>::MaxColsAtCompileTime,
- Flags = EvalBeforeNestingBit | EvalBeforeAssigningBit,
+ Flags = EvalBeforeNestingBit | EvalBeforeAssigningBit | NestByRefBit, // Note that EvalBeforeNestingBit and NestByRefBit
+ // are not used in practice because ei_nested is overloaded for products
CoeffReadCost = 0 // FIXME why is it needed ?
};
};
-// enforce evaluation before nesting
-template<typename Derived, typename Lhs, typename Rhs,int N,typename EvalType>
-struct ei_nested<ProductBase<Derived,Lhs,Rhs>, N, EvalType>
-{
- typedef EvalType type;
-};
-
#define EIGEN_PRODUCT_PUBLIC_INTERFACE(Derived) \
typedef ProductBase<Derived, Lhs, Rhs > Base; \
- _EIGEN_GENERIC_PUBLIC_INTERFACE(Derived) \
+ EIGEN_DENSE_PUBLIC_INTERFACE(Derived) \
typedef typename Base::LhsNested LhsNested; \
typedef typename Base::_LhsNested _LhsNested; \
typedef typename Base::LhsBlasTraits LhsBlasTraits; \
@@ -75,8 +69,8 @@ class ProductBase : public MatrixBase<Derived>
{
public:
typedef MatrixBase<Derived> Base;
- _EIGEN_GENERIC_PUBLIC_INTERFACE(ProductBase)
-
+ EIGEN_DENSE_PUBLIC_INTERFACE(ProductBase)
+ protected:
typedef typename Lhs::Nested LhsNested;
typedef typename ei_cleantype<LhsNested>::type _LhsNested;
typedef ei_blas_traits<_LhsNested> LhsBlasTraits;
@@ -89,7 +83,11 @@ class ProductBase : public MatrixBase<Derived>
typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType;
typedef typename ei_cleantype<ActualRhsType>::type _ActualRhsType;
- using Base::derived;
+ // Diagonal of a product: no need to evaluate the arguments because they are going to be evaluated only once
+ typedef CoeffBasedProduct<LhsNested, RhsNested, 0> FullyLazyCoeffBaseProductType;
+
+ public:
+
typedef typename Base::PlainMatrixType PlainMatrixType;
ProductBase(const Lhs& lhs, const Rhs& rhs)
@@ -115,17 +113,34 @@ class ProductBase : public MatrixBase<Derived>
template<typename Dest>
inline void scaleAndAddTo(Dest& dst,Scalar alpha) const { derived().scaleAndAddTo(dst,alpha); }
- EIGEN_DEPRECATED const Flagged<ProductBase, 0, EvalBeforeAssigningBit> lazy() const
- { return *this; }
-
const _LhsNested& lhs() const { return m_lhs; }
const _RhsNested& rhs() const { return m_rhs; }
+ // Implicit convertion to the nested type (trigger the evaluation of the product)
+ operator const PlainMatrixType& () const
+ {
+ m_result.resize(m_lhs.rows(), m_rhs.cols());
+ this->evalTo(m_result);
+ return m_result;
+ }
+
+ const Diagonal<FullyLazyCoeffBaseProductType,0> diagonal() const
+ { return FullyLazyCoeffBaseProductType(m_lhs, m_rhs); }
+
+ template<int Index>
+ const Diagonal<FullyLazyCoeffBaseProductType,Index> diagonal() const
+ { return FullyLazyCoeffBaseProductType(m_lhs, m_rhs); }
+
+ const Diagonal<FullyLazyCoeffBaseProductType,Dynamic> diagonal(int index) const
+ { return FullyLazyCoeffBaseProductType(m_lhs, m_rhs).diagonal(index); }
+
protected:
const LhsNested m_lhs;
const RhsNested m_rhs;
+ mutable PlainMatrixType m_result;
+
private:
// discard coeff methods
@@ -135,6 +150,14 @@ class ProductBase : public MatrixBase<Derived>
void coeffRef(int);
};
+// here we need to overload the nested rule for products
+// such that the nested type is a const reference to a plain matrix
+template<typename Lhs, typename Rhs, int Mode, int N, typename PlainMatrixType>
+struct ei_nested<GeneralProduct<Lhs,Rhs,Mode>, N, PlainMatrixType>
+{
+ typedef PlainMatrixType const& type;
+};
+
template<typename NestedProduct>
class ScaledProduct;
diff --git a/Eigen/src/Core/Redux.h b/Eigen/src/Core/Redux.h
index 1643f13b2..d5b0c60c2 100644
--- a/Eigen/src/Core/Redux.h
+++ b/Eigen/src/Core/Redux.h
@@ -49,7 +49,7 @@ private:
MightVectorize = (int(Derived::Flags)&ActualPacketAccessBit)
&& (ei_functor_traits<Func>::PacketAccess),
MayLinearVectorize = MightVectorize && (int(Derived::Flags)&LinearAccessBit),
- MaySliceVectorize = MightVectorize && int(InnerMaxSize)>=3*PacketSize
+ MaySliceVectorize = MightVectorize && int(InnerMaxSize)>=3*PacketSize
};
public:
@@ -58,7 +58,7 @@ public:
: int(MaySliceVectorize) ? int(SliceVectorizedTraversal)
: int(DefaultTraversal)
};
-
+
private:
enum {
Cost = Derived::SizeAtCompileTime * Derived::CoeffReadCost
@@ -123,7 +123,7 @@ struct ei_redux_novec_unroller<Func, Derived, Start, 0>
};
/*** vectorization ***/
-
+
template<typename Func, typename Derived, int Start, int Length>
struct ei_redux_vec_unroller
{
@@ -223,7 +223,7 @@ struct ei_redux_impl<Func, Derived, LinearVectorizedTraversal, NoUnrolling>
for(int index = alignedStart + packetSize; index < alignedEnd; index += packetSize)
packet_res = func.packetOp(packet_res, mat.template packet<alignment>(index));
res = func.predux(packet_res);
-
+
for(int index = 0; index < alignedStart; ++index)
res = func(res,mat.coeff(index));
@@ -265,7 +265,7 @@ struct ei_redux_impl<Func, Derived, SliceVectorizedTraversal, NoUnrolling>
for(int i=0; i<packetedInnerSize; i+=int(packetSize))
packet_res = func.packetOp(packet_res, mat.template packet<Unaligned>
(isRowMajor?j:i, isRowMajor?i:j));
-
+
res = func.predux(packet_res);
for(int j=0; j<outerSize; ++j)
for(int i=packetedInnerSize; i<innerSize; ++i)
@@ -313,10 +313,9 @@ template<typename Func>
inline typename ei_result_of<Func(typename ei_traits<Derived>::Scalar)>::type
DenseBase<Derived>::redux(const Func& func) const
{
- typename Derived::Nested nested(derived());
typedef typename ei_cleantype<typename Derived::Nested>::type ThisNested;
return ei_redux_impl<Func, ThisNested>
- ::run(nested, func);
+ ::run(derived(), func);
}
/** \returns the minimum of all coefficients of *this
@@ -356,7 +355,7 @@ template<typename Derived>
EIGEN_STRONG_INLINE typename ei_traits<Derived>::Scalar
DenseBase<Derived>::mean() const
{
- return this->redux(Eigen::ei_scalar_sum_op<Scalar>()) / this->size();
+ return Scalar(this->redux(Eigen::ei_scalar_sum_op<Scalar>())) / Scalar(this->size());
}
/** \returns the product of all coefficients of *this
@@ -383,7 +382,7 @@ template<typename Derived>
EIGEN_STRONG_INLINE typename ei_traits<Derived>::Scalar
MatrixBase<Derived>::trace() const
{
- return diagonal().sum();
+ return derived().diagonal().sum();
}
#endif // EIGEN_REDUX_H
diff --git a/Eigen/src/Core/ReturnByValue.h b/Eigen/src/Core/ReturnByValue.h
index e35493236..920269365 100644
--- a/Eigen/src/Core/ReturnByValue.h
+++ b/Eigen/src/Core/ReturnByValue.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2009 Gael Guennebaud <g.gael@free.fr>
+// Copyright (C) 2009-2010 Gael Guennebaud <g.gael@free.fr>
// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
//
// Eigen is free software; you can redistribute it and/or
@@ -57,17 +57,31 @@ struct ei_nested<ReturnByValue<Derived>, n, PlainMatrixType>
typedef typename ei_traits<Derived>::ReturnMatrixType type;
};
-template<typename Derived>
- class ReturnByValue : public MatrixBase<ReturnByValue<Derived> >
+template<typename Derived> class ReturnByValue
+ : public ei_traits<Derived>::ReturnMatrixType::template MakeBase<ReturnByValue<Derived> >::Type
{
public:
- EIGEN_GENERIC_PUBLIC_INTERFACE(ReturnByValue)
typedef typename ei_traits<Derived>::ReturnMatrixType ReturnMatrixType;
+ typedef typename ReturnMatrixType::template MakeBase<ReturnByValue<Derived> >::Type Base;
+ EIGEN_DENSE_PUBLIC_INTERFACE(ReturnByValue)
+
template<typename Dest>
inline void evalTo(Dest& dst) const
{ static_cast<const Derived* const>(this)->evalTo(dst); }
inline int rows() const { return static_cast<const Derived* const>(this)->rows(); }
inline int cols() const { return static_cast<const Derived* const>(this)->cols(); }
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+#define Unusable YOU_ARE_TRYING_TO_ACCESS_A_SINGLE_COEFFICIENT_IN_A_SPECIAL_EXPRESSION_WHERE_THAT_IS_NOT_ALLOWED_BECAUSE_THAT_WOULD_BE_INEFFICIENT
+ class Unusable{
+ Unusable(const Unusable&) {}
+ Unusable& operator=(const Unusable&) {return *this;}
+ };
+ const Unusable& coeff(int) const { return *reinterpret_cast<const Unusable*>(this); }
+ const Unusable& coeff(int,int) const { return *reinterpret_cast<const Unusable*>(this); }
+ Unusable& coeffRef(int) { return *reinterpret_cast<Unusable*>(this); }
+ Unusable& coeffRef(int,int) { return *reinterpret_cast<Unusable*>(this); }
+#endif
};
template<typename Derived>
diff --git a/Eigen/src/Core/SelfCwiseBinaryOp.h b/Eigen/src/Core/SelfCwiseBinaryOp.h
index df35d0ee9..7ae2e82a4 100644
--- a/Eigen/src/Core/SelfCwiseBinaryOp.h
+++ b/Eigen/src/Core/SelfCwiseBinaryOp.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2009 Gael Guennebaud <g.gael@free.fr>
+// Copyright (C) 2009-2010 Gael Guennebaud <g.gael@free.fr>
//
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
@@ -47,7 +47,7 @@ template<typename BinaryOp, typename MatrixType> class SelfCwiseBinaryOp
public:
typedef typename MatrixType::template MakeBase< SelfCwiseBinaryOp<BinaryOp, MatrixType> >::Type Base;
- _EIGEN_DENSE_PUBLIC_INTERFACE(SelfCwiseBinaryOp)
+ EIGEN_DENSE_PUBLIC_INTERFACE(SelfCwiseBinaryOp)
typedef typename ei_packet_traits<Scalar>::type Packet;
diff --git a/Eigen/src/Core/SolveTriangular.h b/Eigen/src/Core/SolveTriangular.h
index 73cf77387..cac1e2554 100644
--- a/Eigen/src/Core/SolveTriangular.h
+++ b/Eigen/src/Core/SolveTriangular.h
@@ -254,7 +254,7 @@ void TriangularView<MatrixType,Mode>::solveInPlace(const MatrixBase<OtherDerived
OtherCopy otherCopy(other);
ei_triangular_solver_selector<MatrixType, typename ei_unref<OtherCopy>::type,
- Side, Mode>::run(_expression(), otherCopy);
+ Side, Mode>::run(nestedExpression(), otherCopy);
if (copy)
other = otherCopy;
diff --git a/Eigen/src/Core/StableNorm.h b/Eigen/src/Core/StableNorm.h
index b4d6aa353..70371d8ba 100644
--- a/Eigen/src/Core/StableNorm.h
+++ b/Eigen/src/Core/StableNorm.h
@@ -118,10 +118,10 @@ MatrixBase<Derived>::blueNorm() const
iexp = - ((iemax+it)/2);
s2m = RealScalar(std::pow(RealScalar(ibeta),RealScalar(iexp))); // scaling factor for upper range
- overfl = rbig*s2m; // overfow boundary for abig
+ overfl = rbig*s2m; // overflow boundary for abig
eps = RealScalar(std::pow(double(ibeta), 1-it));
relerr = ei_sqrt(eps); // tolerance for neglecting asml
- abig = 1.0/eps - 1.0;
+ abig = RealScalar(1.0/eps - 1.0);
if (RealScalar(nbig)>abig) nmax = int(abig); // largest safe n
else nmax = nbig;
}
diff --git a/Eigen/src/Core/Swap.h b/Eigen/src/Core/Swap.h
index 60b6fffc9..186268af0 100644
--- a/Eigen/src/Core/Swap.h
+++ b/Eigen/src/Core/Swap.h
@@ -40,7 +40,7 @@ template<typename ExpressionType> class SwapWrapper
public:
typedef typename ExpressionType::template MakeBase<SwapWrapper<ExpressionType> >::Type Base;
- _EIGEN_DENSE_PUBLIC_INTERFACE(SwapWrapper)
+ EIGEN_DENSE_PUBLIC_INTERFACE(SwapWrapper)
typedef typename ei_packet_traits<Scalar>::type Packet;
inline SwapWrapper(ExpressionType& xpr) : m_expression(xpr) {}
diff --git a/Eigen/src/Core/Transpose.h b/Eigen/src/Core/Transpose.h
index 35b8b2ed3..bd06d8464 100644
--- a/Eigen/src/Core/Transpose.h
+++ b/Eigen/src/Core/Transpose.h
@@ -2,6 +2,7 @@
// for linear algebra.
//
// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2009-2010 Gael Guennebaud <g.gael@free.fr>
//
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
@@ -49,7 +50,7 @@ struct ei_traits<Transpose<MatrixType> > : ei_traits<MatrixType>
ColsAtCompileTime = MatrixType::RowsAtCompileTime,
MaxRowsAtCompileTime = MatrixType::MaxColsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
- Flags = (int(_MatrixTypeNested::Flags) ^ RowMajorBit),
+ Flags = int(_MatrixTypeNested::Flags & ~NestByRefBit) ^ RowMajorBit,
CoeffReadCost = _MatrixTypeNested::CoeffReadCost
};
};
@@ -71,13 +72,11 @@ template<typename MatrixType> class Transpose
inline int rows() const { return m_matrix.cols(); }
inline int cols() const { return m_matrix.rows(); }
- /** \internal used for introspection */
- const typename ei_cleantype<typename MatrixType::Nested>::type&
- _expression() const { return m_matrix; }
-
+ /** \returns the nested expression */
const typename ei_cleantype<typename MatrixType::Nested>::type&
nestedExpression() const { return m_matrix; }
+ /** \returns the nested expression */
typename ei_cleantype<typename MatrixType::Nested>::type&
nestedExpression() { return m_matrix.const_cast_derived(); }
@@ -89,65 +88,57 @@ template<typename MatrixType> class Transpose
template<typename MatrixType> class TransposeImpl<MatrixType,Dense>
: public MatrixType::template MakeBase<Transpose<MatrixType> >::Type
{
- const typename ei_cleantype<typename MatrixType::Nested>::type& nestedExpression() const
- { return derived().nestedExpression(); }
- typename ei_cleantype<typename MatrixType::Nested>::type& nestedExpression()
- { return derived().nestedExpression(); }
-
public:
- //EIGEN_DENSE_PUBLpename IC_INTERFACE(TransposeImpl,MatrixBase<Transpose<MatrixType> >)
typedef typename MatrixType::template MakeBase<Transpose<MatrixType> >::Type Base;
- _EIGEN_DENSE_PUBLIC_INTERFACE(Transpose<MatrixType>)
-
-// EIGEN_EXPRESSION_IMPL_COMMON(MatrixBase<Transpose<MatrixType> >)
+ EIGEN_DENSE_PUBLIC_INTERFACE(Transpose<MatrixType>)
- inline int stride() const { return nestedExpression().stride(); }
- inline Scalar* data() { return nestedExpression().data(); }
- inline const Scalar* data() const { return nestedExpression().data(); }
+ inline int stride() const { return derived().nestedExpression().stride(); }
+ inline Scalar* data() { return derived().nestedExpression().data(); }
+ inline const Scalar* data() const { return derived().nestedExpression().data(); }
inline Scalar& coeffRef(int row, int col)
{
- return nestedExpression().const_cast_derived().coeffRef(col, row);
+ return const_cast_derived().nestedExpression().coeffRef(col, row);
}
inline Scalar& coeffRef(int index)
{
- return nestedExpression().const_cast_derived().coeffRef(index);
+ return const_cast_derived().nestedExpression().coeffRef(index);
}
inline const CoeffReturnType coeff(int row, int col) const
{
- return nestedExpression().coeff(col, row);
+ return derived().nestedExpression().coeff(col, row);
}
inline const CoeffReturnType coeff(int index) const
{
- return nestedExpression().coeff(index);
+ return derived().nestedExpression().coeff(index);
}
template<int LoadMode>
inline const PacketScalar packet(int row, int col) const
{
- return nestedExpression().template packet<LoadMode>(col, row);
+ return derived().nestedExpression().template packet<LoadMode>(col, row);
}
template<int LoadMode>
inline void writePacket(int row, int col, const PacketScalar& x)
{
- nestedExpression().const_cast_derived().template writePacket<LoadMode>(col, row, x);
+ const_cast_derived().nestedExpression().template writePacket<LoadMode>(col, row, x);
}
template<int LoadMode>
inline const PacketScalar packet(int index) const
{
- return nestedExpression().template packet<LoadMode>(index);
+ return derived().nestedExpression().template packet<LoadMode>(index);
}
template<int LoadMode>
inline void writePacket(int index, const PacketScalar& x)
{
- nestedExpression().const_cast_derived().template writePacket<LoadMode>(index, x);
+ const_cast_derived().nestedExpression().template writePacket<LoadMode>(index, x);
}
};
diff --git a/Eigen/src/Core/TriangularMatrix.h b/Eigen/src/Core/TriangularMatrix.h
index e51be366b..8bea0aa68 100644
--- a/Eigen/src/Core/TriangularMatrix.h
+++ b/Eigen/src/Core/TriangularMatrix.h
@@ -204,8 +204,8 @@ template<typename _MatrixType, unsigned int _Mode> class TriangularView
return m_matrix.const_cast_derived().coeffRef(row, col);
}
- /** \internal */
- const MatrixType& _expression() const { return m_matrix; }
+ const MatrixType& nestedExpression() const { return m_matrix; }
+ MatrixType& nestedExpression() { return const_cast<MatrixType&>(m_matrix); }
/** Assigns a triangular matrix to a triangular part of a dense matrix */
template<typename OtherDerived>
@@ -215,7 +215,7 @@ template<typename _MatrixType, unsigned int _Mode> class TriangularView
TriangularView& operator=(const MatrixBase<OtherDerived>& other);
TriangularView& operator=(const TriangularView& other)
- { return *this = other._expression(); }
+ { return *this = other.nestedExpression(); }
template<typename OtherDerived>
void lazyAssign(const TriangularBase<OtherDerived>& other);
@@ -510,15 +510,15 @@ template<typename OtherDerived>
inline TriangularView<MatrixType, Mode>&
TriangularView<MatrixType, Mode>::operator=(const TriangularBase<OtherDerived>& other)
{
- ei_assert(Mode == OtherDerived::Mode);
+ ei_assert(Mode == int(OtherDerived::Mode));
if(ei_traits<OtherDerived>::Flags & EvalBeforeAssigningBit)
{
typename OtherDerived::DenseMatrixType other_evaluated(other.rows(), other.cols());
- other_evaluated.template triangularView<Mode>().lazyAssign(other.derived());
+ other_evaluated.template triangularView<Mode>().lazyAssign(other.derived().nestedExpression());
lazyAssign(other_evaluated);
}
else
- lazyAssign(other.derived());
+ lazyAssign(other.derived().nestedExpression());
return *this;
}
@@ -534,7 +534,7 @@ void TriangularView<MatrixType, Mode>::lazyAssign(const TriangularBase<OtherDeri
<MatrixType, OtherDerived, int(Mode),
unroll ? int(MatrixType::SizeAtCompileTime) : Dynamic,
false // preserve the opposite triangular part
- >::run(m_matrix.const_cast_derived(), other.derived()._expression());
+ >::run(m_matrix.const_cast_derived(), other.derived().nestedExpression());
}
/***************************************************************************
@@ -571,7 +571,7 @@ void TriangularBase<Derived>::evalToLazy(MatrixBase<DenseDerived> &other) const
<DenseDerived, typename ei_traits<Derived>::ExpressionType, Derived::Mode,
unroll ? int(DenseDerived::SizeAtCompileTime) : Dynamic,
true // clear the opposite triangular part
- >::run(other.derived(), derived()._expression());
+ >::run(other.derived(), derived().nestedExpression());
}
/***************************************************************************
diff --git a/Eigen/src/Core/VectorBlock.h b/Eigen/src/Core/VectorBlock.h
index 6dd8da938..cbf97aeb3 100644
--- a/Eigen/src/Core/VectorBlock.h
+++ b/Eigen/src/Core/VectorBlock.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2008-2009 Gael Guennebaud <g.gael@free.fr>
+// Copyright (C) 2008-2010 Gael Guennebaud <g.gael@free.fr>
// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
//
// Eigen is free software; you can redistribute it and/or
@@ -75,7 +75,7 @@ template<typename VectorType, int Size> class VectorBlock
IsColVector = ei_traits<VectorType>::ColsAtCompileTime==1
};
public:
- _EIGEN_GENERIC_PUBLIC_INTERFACE(VectorBlock)
+ EIGEN_DENSE_PUBLIC_INTERFACE(VectorBlock)
using Base::operator=;
diff --git a/Eigen/src/Core/arch/SSE/PacketMath.h b/Eigen/src/Core/arch/SSE/PacketMath.h
index f65801137..a5a56f759 100644
--- a/Eigen/src/Core/arch/SSE/PacketMath.h
+++ b/Eigen/src/Core/arch/SSE/PacketMath.h
@@ -91,6 +91,10 @@ template<> EIGEN_STRONG_INLINE Packet2d ei_pset1<double>(const double& from) { r
#endif
template<> EIGEN_STRONG_INLINE Packet4i ei_pset1<int>(const int& from) { return _mm_set1_epi32(from); }
+template<> EIGEN_STRONG_INLINE Packet4f ei_plset<float>(const float& a) { return _mm_add_ps(ei_pset1(a), _mm_set_ps(3,2,1,0)); }
+template<> EIGEN_STRONG_INLINE Packet2d ei_plset<double>(const double& a) { return _mm_add_pd(ei_pset1(a),_mm_set_pd(1,0)); }
+template<> EIGEN_STRONG_INLINE Packet4i ei_plset<int>(const int& a) { return _mm_add_epi32(ei_pset1(a),_mm_set_epi32(3,2,1,0)); }
+
template<> EIGEN_STRONG_INLINE Packet4f ei_padd<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_add_ps(a,b); }
template<> EIGEN_STRONG_INLINE Packet2d ei_padd<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_add_pd(a,b); }
template<> EIGEN_STRONG_INLINE Packet4i ei_padd<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_add_epi32(a,b); }
diff --git a/Eigen/src/Core/products/GeneralUnrolled.h b/Eigen/src/Core/products/CoeffBasedProduct.h
index f04c27a95..f030d59b5 100644
--- a/Eigen/src/Core/products/GeneralUnrolled.h
+++ b/Eigen/src/Core/products/CoeffBasedProduct.h
@@ -2,7 +2,7 @@
// for linear algebra.
//
// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+// Copyright (C) 2008-2010 Gael Guennebaud <g.gael@free.fr>
//
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
@@ -23,11 +23,14 @@
// License and a copy of the GNU General Public License along with
// Eigen. If not, see <http://www.gnu.org/licenses/>.
-#ifndef EIGEN_GENERAL_UNROLLED_PRODUCT_H
-#define EIGEN_GENERAL_UNROLLED_PRODUCT_H
+#ifndef EIGEN_COEFFBASED_PRODUCT_H
+#define EIGEN_COEFFBASED_PRODUCT_H
/*********************************************************************************
-* Specialization of GeneralProduct<> for products with small fixed sizes
+* Coefficient based product implementation.
+* It is designed for the following use cases:
+* - small fixed sizes
+* - lazy products
*********************************************************************************/
/* Since the all the dimensions of the product are small, here we can rely
@@ -42,8 +45,8 @@ struct ei_product_coeff_impl;
template<int StorageOrder, int Index, typename Lhs, typename Rhs, typename PacketScalar, int LoadMode>
struct ei_product_packet_impl;
-template<typename LhsNested, typename RhsNested>
-struct ei_traits<GeneralProduct<LhsNested,RhsNested,UnrolledProduct> >
+template<typename LhsNested, typename RhsNested, int NestingFlags>
+struct ei_traits<CoeffBasedProduct<LhsNested,RhsNested,NestingFlags> >
{
typedef DenseStorageMatrix DenseStorageType;
typedef typename ei_cleantype<LhsNested>::type _LhsNested;
@@ -79,8 +82,7 @@ struct ei_traits<GeneralProduct<LhsNested,RhsNested,UnrolledProduct> >
RemovedBits = ~(EvalToRowMajor ? 0 : RowMajorBit),
Flags = ((unsigned int)(LhsFlags | RhsFlags) & HereditaryBits & RemovedBits)
- | EvalBeforeAssigningBit
- | EvalBeforeNestingBit
+ | NestingFlags
| (CanVectorizeLhs || CanVectorizeRhs ? PacketAccessBit : 0)
| (LhsFlags & RhsFlags & AlignedBit),
@@ -98,34 +100,43 @@ struct ei_traits<GeneralProduct<LhsNested,RhsNested,UnrolledProduct> >
};
};
-template<typename LhsNested, typename RhsNested> class GeneralProduct<LhsNested,RhsNested,UnrolledProduct>
+template<typename LhsNested, typename RhsNested, int NestingFlags>
+class CoeffBasedProduct
: ei_no_assignment_operator,
- public MatrixBase<GeneralProduct<LhsNested, RhsNested, UnrolledProduct> >
+ public MatrixBase<CoeffBasedProduct<LhsNested, RhsNested, NestingFlags> >
{
public:
- EIGEN_GENERIC_PUBLIC_INTERFACE(GeneralProduct)
+ typedef MatrixBase<CoeffBasedProduct> Base;
+ EIGEN_DENSE_PUBLIC_INTERFACE(CoeffBasedProduct)
+ typedef typename Base::PlainMatrixType PlainMatrixType;
private:
- typedef typename ei_traits<GeneralProduct>::_LhsNested _LhsNested;
- typedef typename ei_traits<GeneralProduct>::_RhsNested _RhsNested;
+ typedef typename ei_traits<CoeffBasedProduct>::_LhsNested _LhsNested;
+ typedef typename ei_traits<CoeffBasedProduct>::_RhsNested _RhsNested;
enum {
PacketSize = ei_packet_traits<Scalar>::size,
- InnerSize = ei_traits<GeneralProduct>::InnerSize,
+ InnerSize = ei_traits<CoeffBasedProduct>::InnerSize,
Unroll = CoeffReadCost <= EIGEN_UNROLLING_LIMIT,
- CanVectorizeInner = ei_traits<GeneralProduct>::CanVectorizeInner
+ CanVectorizeInner = ei_traits<CoeffBasedProduct>::CanVectorizeInner
};
typedef ei_product_coeff_impl<CanVectorizeInner ? InnerVectorizedTraversal : DefaultTraversal,
Unroll ? InnerSize-1 : Dynamic,
_LhsNested, _RhsNested, Scalar> ScalarCoeffImpl;
+ typedef CoeffBasedProduct<LhsNested,RhsNested,NestByRefBit> LazyCoeffBasedProductType;
+
public:
+ inline CoeffBasedProduct(const CoeffBasedProduct& other)
+ : Base(), m_lhs(other.m_lhs), m_rhs(other.m_rhs)
+ {}
+
template<typename Lhs, typename Rhs>
- inline GeneralProduct(const Lhs& lhs, const Rhs& rhs)
+ inline CoeffBasedProduct(const Lhs& lhs, const Rhs& rhs)
: m_lhs(lhs), m_rhs(rhs)
{
// we don't allow taking products of matrices of different real types, as that wouldn't be vectorizable.
@@ -170,11 +181,40 @@ template<typename LhsNested, typename RhsNested> class GeneralProduct<LhsNested,
return res;
}
+ // Implicit convertion to the nested type (trigger the evaluation of the product)
+ operator const PlainMatrixType& () const
+ {
+ m_result.lazyAssign(*this);
+ return m_result;
+ }
+
+ const _LhsNested& lhs() const { return m_lhs; }
+ const _RhsNested& rhs() const { return m_rhs; }
+
+ const Diagonal<LazyCoeffBasedProductType,0> diagonal() const
+ { return reinterpret_cast<const LazyCoeffBasedProductType&>(*this); }
+
+ template<int Index>
+ const Diagonal<LazyCoeffBasedProductType,Index> diagonal() const
+ { return reinterpret_cast<const LazyCoeffBasedProductType&>(*this); }
+
+ const Diagonal<LazyCoeffBasedProductType,Dynamic> diagonal(int index) const
+ { return reinterpret_cast<const LazyCoeffBasedProductType&>(*this).diagonal(index); }
+
protected:
const LhsNested m_lhs;
const RhsNested m_rhs;
+
+ mutable PlainMatrixType m_result;
};
+// here we need to overload the nested rule for products
+// such that the nested type is a const reference to a plain matrix
+template<typename Lhs, typename Rhs, int N, typename PlainMatrixType>
+struct ei_nested<CoeffBasedProduct<Lhs,Rhs,EvalBeforeNestingBit|EvalBeforeAssigningBit>, N, PlainMatrixType>
+{
+ typedef PlainMatrixType const& type;
+};
/***************************************************************************
* Normal product .coeff() implementation (with meta-unrolling)
@@ -385,4 +425,4 @@ struct ei_product_packet_impl<ColMajor, Dynamic, Lhs, Rhs, PacketScalar, LoadMod
}
};
-#endif // EIGEN_GENERAL_UNROLLED_PRODUCT_H
+#endif // EIGEN_COEFFBASED_PRODUCT_H
diff --git a/Eigen/src/Core/util/BlasUtil.h b/Eigen/src/Core/util/BlasUtil.h
index 916a125e3..3777464dc 100644
--- a/Eigen/src/Core/util/BlasUtil.h
+++ b/Eigen/src/Core/util/BlasUtil.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2009 Gael Guennebaud <g.gael@free.fr>
+// Copyright (C) 2009-2010 Gael Guennebaud <g.gael@free.fr>
//
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
@@ -185,8 +185,8 @@ struct ei_blas_traits<CwiseUnaryOp<ei_scalar_conjugate_op<Scalar>, NestedXpr> >
IsComplex = NumTraits<Scalar>::IsComplex,
NeedToConjugate = Base::NeedToConjugate ? 0 : IsComplex
};
- static inline ExtractType extract(const XprType& x) { return Base::extract(x._expression()); }
- static inline Scalar extractScalarFactor(const XprType& x) { return ei_conj(Base::extractScalarFactor(x._expression())); }
+ static inline ExtractType extract(const XprType& x) { return Base::extract(x.nestedExpression()); }
+ static inline Scalar extractScalarFactor(const XprType& x) { return ei_conj(Base::extractScalarFactor(x.nestedExpression())); }
};
// pop scalar multiple
@@ -197,9 +197,9 @@ struct ei_blas_traits<CwiseUnaryOp<ei_scalar_multiple_op<Scalar>, NestedXpr> >
typedef ei_blas_traits<NestedXpr> Base;
typedef CwiseUnaryOp<ei_scalar_multiple_op<Scalar>, NestedXpr> XprType;
typedef typename Base::ExtractType ExtractType;
- static inline ExtractType extract(const XprType& x) { return Base::extract(x._expression()); }
+ static inline ExtractType extract(const XprType& x) { return Base::extract(x.nestedExpression()); }
static inline Scalar extractScalarFactor(const XprType& x)
- { return x._functor().m_other * Base::extractScalarFactor(x._expression()); }
+ { return x.functor().m_other * Base::extractScalarFactor(x.nestedExpression()); }
};
// pop opposite
@@ -210,9 +210,9 @@ struct ei_blas_traits<CwiseUnaryOp<ei_scalar_opposite_op<Scalar>, NestedXpr> >
typedef ei_blas_traits<NestedXpr> Base;
typedef CwiseUnaryOp<ei_scalar_opposite_op<Scalar>, NestedXpr> XprType;
typedef typename Base::ExtractType ExtractType;
- static inline ExtractType extract(const XprType& x) { return Base::extract(x._expression()); }
+ static inline ExtractType extract(const XprType& x) { return Base::extract(x.nestedExpression()); }
static inline Scalar extractScalarFactor(const XprType& x)
- { return - Base::extractScalarFactor(x._expression()); }
+ { return - Base::extractScalarFactor(x.nestedExpression()); }
};
// pop/push transpose
@@ -232,8 +232,8 @@ struct ei_blas_traits<Transpose<NestedXpr> >
enum {
IsTransposed = Base::IsTransposed ? 0 : 1
};
- static inline const ExtractType extract(const XprType& x) { return Base::extract(x._expression()); }
- static inline Scalar extractScalarFactor(const XprType& x) { return Base::extractScalarFactor(x._expression()); }
+ static inline const ExtractType extract(const XprType& x) { return Base::extract(x.nestedExpression()); }
+ static inline Scalar extractScalarFactor(const XprType& x) { return Base::extractScalarFactor(x.nestedExpression()); }
};
#endif // EIGEN_BLASUTIL_H
diff --git a/Eigen/src/Core/util/Constants.h b/Eigen/src/Core/util/Constants.h
index 01c1aeb2f..51590b03d 100644
--- a/Eigen/src/Core/util/Constants.h
+++ b/Eigen/src/Core/util/Constants.h
@@ -35,7 +35,7 @@
* - It should be smaller than the sqrt of INT_MAX. Indeed, we often multiply a number of rows with a number
* of columns in order to compute a number of coefficients. Even if we guard that with an "if" checking whether
* the values are Dynamic, we still get a compiler warning "integer overflow". So the only way to get around
- * it would be a meta-selector. Doing this everywhere would reduce code readability and lenghten compilation times.
+ * it would be a meta-selector. Doing this everywhere would reduce code readability and lengthen compilation times.
* Also, disabling compiler warnings for integer overflow, sounds like a bad idea.
* - It should be a prime number, because for example the old value 10000 led to bugs with 100x100 matrices.
*
@@ -76,7 +76,7 @@ const unsigned int EvalBeforeNestingBit = 0x2;
/** \ingroup flags
*
- * means the expression should be evaluated before any assignement */
+ * means the expression should be evaluated before any assignment */
const unsigned int EvalBeforeAssigningBit = 0x4;
/** \ingroup flags
@@ -97,6 +97,8 @@ const unsigned int EvalBeforeAssigningBit = 0x4;
*/
const unsigned int PacketAccessBit = 0x8;
+const unsigned int NestByRefBit = 0x100;
+
#ifdef EIGEN_VECTORIZE
/** \ingroup flags
*
@@ -224,6 +226,11 @@ namespace {
EIGEN_UNUSED NoChange_t NoChange;
}
+struct Sequential_t {};
+namespace {
+ EIGEN_UNUSED Sequential_t Sequential;
+}
+
struct Default_t {};
namespace {
EIGEN_UNUSED Default_t Default;
@@ -262,4 +269,6 @@ namespace Architecture
enum DenseStorageMatrix {};
enum DenseStorageArray {};
+enum { CoeffBasedProductMode, LazyCoeffBasedProductMode, OuterProduct, InnerProduct, GemvProduct, GemmProduct };
+
#endif // EIGEN_CONSTANTS_H
diff --git a/Eigen/src/Core/util/ForwardDeclarations.h b/Eigen/src/Core/util/ForwardDeclarations.h
index b6bba04e6..aed0abe6d 100644
--- a/Eigen/src/Core/util/ForwardDeclarations.h
+++ b/Eigen/src/Core/util/ForwardDeclarations.h
@@ -49,8 +49,10 @@ template<typename NullaryOp, typename MatrixType> class CwiseNullaryOp;
template<typename UnaryOp, typename MatrixType> class CwiseUnaryOp;
template<typename ViewOp, typename MatrixType> class CwiseUnaryView;
template<typename BinaryOp, typename Lhs, typename Rhs> class CwiseBinaryOp;
-template<typename BinOp, typename MatrixType> class SelfCwiseBinaryOp;
+template<typename BinOp, typename MatrixType> class SelfCwiseBinaryOp;
template<typename Derived, typename Lhs, typename Rhs> class ProductBase;
+template<typename Lhs, typename Rhs, int Mode> class GeneralProduct;
+template<typename Lhs, typename Rhs, int NestingFlags> class CoeffBasedProduct;
template<typename Derived> class DiagonalBase;
template<typename _DiagonalVectorType> class DiagonalWrapper;
diff --git a/Eigen/src/Core/util/Macros.h b/Eigen/src/Core/util/Macros.h
index 6e31f1580..dc1aa150b 100644
--- a/Eigen/src/Core/util/Macros.h
+++ b/Eigen/src/Core/util/Macros.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+// Copyright (C) 2008-2010 Gael Guennebaud <g.gael@free.fr>
// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
//
// Eigen is free software; you can redistribute it and/or
@@ -281,49 +281,29 @@ using Eigen::ei_cos;
* Just a side note. Commenting within defines works only by documenting
* behind the object (via '!<'). Comments cannot be multi-line and thus
* we have these extra long lines. What is confusing doxygen over here is
-* that we use '\' and basically have a bunch of typedefs with their
+* that we use '\' and basically have a bunch of typedefs with their
* documentation in a single line.
**/
-#define _EIGEN_GENERIC_PUBLIC_INTERFACE(Derived) \
+#define EIGEN_GENERIC_PUBLIC_INTERFACE_NEW(Derived) \
typedef typename Eigen::ei_traits<Derived>::Scalar Scalar; /*!< \brief Numeric type, e.g. float, double, int or std::complex<float>. */ \
typedef typename Eigen::NumTraits<Scalar>::Real RealScalar; /*!< \brief The underlying numeric type for composed scalar types. \details In cases where Scalar is e.g. std::complex<T>, T were corresponding to RealScalar. */ \
- typedef typename Base::PacketScalar PacketScalar; \
typedef typename Base::CoeffReturnType CoeffReturnType; /*!< \brief The return type for coefficient access. \details Depending on whether the object allows direct coefficient access (e.g. for a MatrixXd), this type is either 'const Scalar&' or simply 'Scalar' for objects that do not allow direct coefficient access. */ \
typedef typename Eigen::ei_nested<Derived>::type Nested; \
enum { RowsAtCompileTime = Eigen::ei_traits<Derived>::RowsAtCompileTime, \
ColsAtCompileTime = Eigen::ei_traits<Derived>::ColsAtCompileTime, \
- MaxRowsAtCompileTime = Eigen::ei_traits<Derived>::MaxRowsAtCompileTime, \
- MaxColsAtCompileTime = Eigen::ei_traits<Derived>::MaxColsAtCompileTime, \
Flags = Eigen::ei_traits<Derived>::Flags, \
CoeffReadCost = Eigen::ei_traits<Derived>::CoeffReadCost, \
SizeAtCompileTime = Base::SizeAtCompileTime, \
MaxSizeAtCompileTime = Base::MaxSizeAtCompileTime, \
IsVectorAtCompileTime = Base::IsVectorAtCompileTime };
-#define EIGEN_GENERIC_PUBLIC_INTERFACE(Derived) \
- typedef Eigen::MatrixBase<Derived> Base; \
- _EIGEN_GENERIC_PUBLIC_INTERFACE(Derived)
-#define EIGEN_GENERIC_PUBLIC_INTERFACE_NEW(Derived) \
- typedef typename Eigen::ei_traits<Derived>::Scalar Scalar; \
- typedef typename Eigen::NumTraits<Scalar>::Real RealScalar; \
- typedef typename Base::CoeffReturnType CoeffReturnType; \
- typedef typename Eigen::ei_nested<Derived>::type Nested; \
- enum { RowsAtCompileTime = Eigen::ei_traits<Derived>::RowsAtCompileTime, \
- ColsAtCompileTime = Eigen::ei_traits<Derived>::ColsAtCompileTime, \
- Flags = Eigen::ei_traits<Derived>::Flags, \
- CoeffReadCost = Eigen::ei_traits<Derived>::CoeffReadCost, \
- SizeAtCompileTime = Base::SizeAtCompileTime, \
- MaxSizeAtCompileTime = Base::MaxSizeAtCompileTime, \
- IsVectorAtCompileTime = Base::IsVectorAtCompileTime };
-
-
-#define _EIGEN_DENSE_PUBLIC_INTERFACE(Derived) \
- typedef typename Eigen::ei_traits<Derived>::Scalar Scalar; \
- typedef typename Eigen::NumTraits<Scalar>::Real RealScalar; \
+#define EIGEN_DENSE_PUBLIC_INTERFACE(Derived) \
+ typedef typename Eigen::ei_traits<Derived>::Scalar Scalar; /*!< \brief Numeric type, e.g. float, double, int or std::complex<float>. */ \
+ typedef typename Eigen::NumTraits<Scalar>::Real RealScalar; /*!< \brief The underlying numeric type for composed scalar types. \details In cases where Scalar is e.g. std::complex<T>, T were corresponding to RealScalar. */ \
typedef typename Base::PacketScalar PacketScalar; \
- typedef typename Base::CoeffReturnType CoeffReturnType; \
+ typedef typename Base::CoeffReturnType CoeffReturnType; /*!< \brief The return type for coefficient access. \details Depending on whether the object allows direct coefficient access (e.g. for a MatrixXd), this type is either 'const Scalar&' or simply 'Scalar' for objects that do not allow direct coefficient access. */ \
typedef typename Eigen::ei_nested<Derived>::type Nested; \
enum { RowsAtCompileTime = Eigen::ei_traits<Derived>::RowsAtCompileTime, \
ColsAtCompileTime = Eigen::ei_traits<Derived>::ColsAtCompileTime, \
@@ -334,11 +314,8 @@ using Eigen::ei_cos;
SizeAtCompileTime = Base::SizeAtCompileTime, \
MaxSizeAtCompileTime = Base::MaxSizeAtCompileTime, \
IsVectorAtCompileTime = Base::IsVectorAtCompileTime }; \
- using Base::derived;
-
-#define EIGEN_DENSE_PUBLIC_INTERFACE(Derived) \
- typedef Eigen::MatrixBase<Derived> Base; \
- _EIGEN_DENSE_PUBLIC_INTERFACE(Derived)
+ using Base::derived; \
+ using Base::const_cast_derived;
#define EIGEN_ENUM_MIN(a,b) (((int)a <= (int)b) ? (int)a : (int)b)
diff --git a/Eigen/src/Core/util/Memory.h b/Eigen/src/Core/util/Memory.h
index bfc6ff686..d4920d213 100644
--- a/Eigen/src/Core/util/Memory.h
+++ b/Eigen/src/Core/util/Memory.h
@@ -61,7 +61,7 @@
*/
inline void* ei_handmade_aligned_malloc(size_t size)
{
- void *original = malloc(size+16);
+ void *original = std::malloc(size+16);
void *aligned = reinterpret_cast<void*>((reinterpret_cast<size_t>(original) & ~(size_t(15))) + 16);
*(reinterpret_cast<void**>(aligned) - 1) = original;
return aligned;
@@ -71,7 +71,7 @@ inline void* ei_handmade_aligned_malloc(size_t size)
inline void ei_handmade_aligned_free(void *ptr)
{
if(ptr)
- free(*(reinterpret_cast<void**>(ptr) - 1));
+ std::free(*(reinterpret_cast<void**>(ptr) - 1));
}
/** \internal allocates \a size bytes. The returned pointer is guaranteed to have 16 bytes alignment.
@@ -119,7 +119,7 @@ template<> inline void* ei_conditional_aligned_malloc<false>(size_t size)
ei_assert(false && "heap allocation is forbidden (EIGEN_NO_MALLOC is defined)");
#endif
- void *result = malloc(size);
+ void *result = std::malloc(size);
#ifdef EIGEN_EXCEPTIONS
if(!result) throw std::bad_alloc();
#endif
@@ -179,7 +179,7 @@ template<bool Align> inline void ei_conditional_aligned_free(void *ptr)
template<> inline void ei_conditional_aligned_free<false>(void *ptr)
{
- free(ptr);
+ std::free(ptr);
}
/** \internal destruct the elements of an array.
@@ -341,7 +341,7 @@ class aligned_allocator
{
public:
typedef size_t size_type;
- typedef ptrdiff_t difference_type;
+ typedef std::ptrdiff_t difference_type;
typedef T* pointer;
typedef const T* const_pointer;
typedef T& reference;
diff --git a/Eigen/src/Core/util/StaticAssert.h b/Eigen/src/Core/util/StaticAssert.h
index 56a57b706..619e7664d 100644
--- a/Eigen/src/Core/util/StaticAssert.h
+++ b/Eigen/src/Core/util/StaticAssert.h
@@ -61,6 +61,7 @@
THIS_METHOD_IS_ONLY_FOR_VECTORS_OF_A_SPECIFIC_SIZE,
THIS_METHOD_IS_ONLY_FOR_MATRICES_OF_A_SPECIFIC_SIZE,
YOU_MADE_A_PROGRAMMING_MISTAKE,
+ EIGEN_INTERNAL_COMPILATION_ERROR_OR_YOU_MADE_A_PROGRAMMING_MISTAKE,
YOU_CALLED_A_FIXED_SIZE_METHOD_ON_A_DYNAMIC_SIZE_MATRIX_OR_VECTOR,
YOU_CALLED_A_DYNAMIC_SIZE_METHOD_ON_A_FIXED_SIZE_MATRIX_OR_VECTOR,
UNALIGNED_LOAD_AND_STORE_OPERATIONS_UNIMPLEMENTED_ON_ALTIVEC,
diff --git a/Eigen/src/Core/util/XprHelper.h b/Eigen/src/Core/util/XprHelper.h
index 6b5387c99..a86e7be89 100644
--- a/Eigen/src/Core/util/XprHelper.h
+++ b/Eigen/src/Core/util/XprHelper.h
@@ -97,7 +97,7 @@ class ei_compute_matrix_flags
};
public:
- enum { ret = LinearAccessBit | DirectAccessBit | packet_access_bit | row_major_bit | aligned_bit };
+ enum { ret = LinearAccessBit | DirectAccessBit | NestByRefBit | packet_access_bit | row_major_bit | aligned_bit };
};
template<int _Rows, int _Cols> struct ei_size_at_compile_time
@@ -142,7 +142,7 @@ template<typename T> struct ei_plain_matrix_type_dense<T,DenseStorageArray>
* in order to avoid a useless copy
*/
-template<typename T, typename StorageType = typename ei_traits<T>::StorageType> class ei_eval;
+template<typename T, typename StorageType = typename ei_traits<T>::StorageType> struct ei_eval;
template<typename T> struct ei_eval<T,Dense>
{
@@ -209,23 +209,11 @@ template<typename T> struct ei_must_nest_by_value { enum { ret = false }; };
template <typename T>
struct ei_ref_selector
{
- typedef T type;
-};
-
-/**
-* Matrices on the other hand side should only be copied, when it is sure
-* we gain by copying (see arithmetic cost check and eval before nesting flag).
-* Note: This is an optimization measure that comprises potential (though little)
-* to create erroneous code. Any user, utilizing ei_nested outside of
-* Eigen needs to take care that no references to temporaries are
-* stored or that this potential danger is at least communicated
-* to the user.
-**/
-template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
-struct ei_ref_selector< Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> >
-{
- typedef Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> MatrixType;
- typedef MatrixType const& type;
+ typedef typename ei_meta_if<
+ bool(ei_traits<T>::Flags & NestByRefBit),
+ T const&,
+ T
+ >::ret type;
};
/** \internal Determines how a given expression should be nested into another one.
diff --git a/Eigen/src/Eigen2Support/Lazy.h b/Eigen/src/Eigen2Support/Lazy.h
index e69de29bb..c4288ede2 100644
--- a/Eigen/src/Eigen2Support/Lazy.h
+++ b/Eigen/src/Eigen2Support/Lazy.h
@@ -0,0 +1,82 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_LAZY_H
+#define EIGEN_LAZY_H
+
+/** \deprecated it is only used by lazy() which is deprecated
+ *
+ * \returns an expression of *this with added flags
+ *
+ * Example: \include MatrixBase_marked.cpp
+ * Output: \verbinclude MatrixBase_marked.out
+ *
+ * \sa class Flagged, extract(), part()
+ */
+template<typename Derived>
+template<unsigned int Added>
+inline const Flagged<Derived, Added, 0>
+MatrixBase<Derived>::marked() const
+{
+ return derived();
+}
+
+/** \deprecated use MatrixBase::noalias()
+ *
+ * \returns an expression of *this with the EvalBeforeAssigningBit flag removed.
+ *
+ * Example: \include MatrixBase_lazy.cpp
+ * Output: \verbinclude MatrixBase_lazy.out
+ *
+ * \sa class Flagged, marked()
+ */
+template<typename Derived>
+inline const Flagged<Derived, 0, EvalBeforeAssigningBit>
+MatrixBase<Derived>::lazy() const
+{
+ return derived();
+}
+
+
+/** \internal
+ * Overloaded to perform an efficient C += (A*B).lazy() */
+template<typename Derived>
+template<typename ProductDerived, typename Lhs, typename Rhs>
+Derived& MatrixBase<Derived>::operator+=(const Flagged<ProductBase<ProductDerived, Lhs,Rhs>, 0,
+ EvalBeforeAssigningBit>& other)
+{
+ other._expression().derived().addTo(derived()); return derived();
+}
+
+/** \internal
+ * Overloaded to perform an efficient C -= (A*B).lazy() */
+template<typename Derived>
+template<typename ProductDerived, typename Lhs, typename Rhs>
+Derived& MatrixBase<Derived>::operator-=(const Flagged<ProductBase<ProductDerived, Lhs,Rhs>, 0,
+ EvalBeforeAssigningBit>& other)
+{
+ other._expression().derived().subTo(derived()); return derived();
+}
+
+#endif // EIGEN_LAZY_H
diff --git a/Eigen/src/Eigenvalues/ComplexEigenSolver.h b/Eigen/src/Eigenvalues/ComplexEigenSolver.h
index d55dc2a96..27c52b4dc 100644
--- a/Eigen/src/Eigenvalues/ComplexEigenSolver.h
+++ b/Eigen/src/Eigenvalues/ComplexEigenSolver.h
@@ -94,7 +94,7 @@ void ComplexEigenSolver<MatrixType>::compute(const MatrixType& matrix)
m_eivalues.resize(n,1);
m_eivec.resize(n,n);
- RealScalar eps = epsilon<RealScalar>();
+ RealScalar eps = NumTraits<RealScalar>::epsilon();
// Reduce to complex Schur form
ComplexSchur<MatrixType> schur(matrix);
diff --git a/Eigen/src/Eigenvalues/ComplexSchur.h b/Eigen/src/Eigenvalues/ComplexSchur.h
index 38a1f56dc..5deac3247 100644
--- a/Eigen/src/Eigenvalues/ComplexSchur.h
+++ b/Eigen/src/Eigenvalues/ComplexSchur.h
@@ -159,7 +159,7 @@ void ComplexSchur<MatrixType>::compute(const MatrixType& matrix, bool skipU)
RealScalar d,sd,sf;
Complex c,b,disc,r1,r2,kappa;
- RealScalar eps = epsilon<RealScalar>();
+ RealScalar eps = NumTraits<RealScalar>::epsilon();
int iter = 0;
while(true)
diff --git a/Eigen/src/Geometry/AlignedBox.h b/Eigen/src/Geometry/AlignedBox.h
index 1865b179b..b5b40a82d 100644
--- a/Eigen/src/Geometry/AlignedBox.h
+++ b/Eigen/src/Geometry/AlignedBox.h
@@ -43,37 +43,70 @@ class AlignedBox
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
enum { AmbientDimAtCompileTime = _AmbientDim };
- typedef _Scalar Scalar;
- typedef typename NumTraits<Scalar>::Real RealScalar;
- typedef Matrix<Scalar,AmbientDimAtCompileTime,1> VectorType;
+ typedef _Scalar Scalar;
+ typedef NumTraits<Scalar> ScalarTraits;
+ typedef typename ScalarTraits::Real RealScalar;
+ typedef typename ScalarTraits::FloatingPoint FloatingPoint;
+ typedef Matrix<Scalar,AmbientDimAtCompileTime,1> VectorType;
+
+ /** Define constants to name the corners of a 1D, 2D or 3D axis aligned bounding box */
+ enum CornerType
+ {
+ /** 1D names */
+ Min=0, Max=1,
+
+ /** Added names for 2D */
+ BottomLeft=0, BottomRight=1,
+ TopLeft=2, TopRight=3,
+
+ /** Added names for 3D */
+ BottomLeftFloor=0, BottomRightFloor=1,
+ TopLeftFloor=2, TopRightFloor=3,
+ BottomLeftCeil=4, BottomRightCeil=5,
+ TopLeftCeil=6, TopRightCeil=7
+ };
+
/** Default constructor initializing a null box. */
inline explicit AlignedBox()
- { if (AmbientDimAtCompileTime!=Dynamic) setNull(); }
+ { if (AmbientDimAtCompileTime!=Dynamic) setEmpty(); }
/** Constructs a null box with \a _dim the dimension of the ambient space. */
inline explicit AlignedBox(int _dim) : m_min(_dim), m_max(_dim)
- { setNull(); }
+ { setEmpty(); }
/** Constructs a box with extremities \a _min and \a _max. */
- inline AlignedBox(const VectorType& _min, const VectorType& _max) : m_min(_min), m_max(_max) {}
+ template<typename OtherVectorType1, typename OtherVectorType2>
+ inline AlignedBox(const OtherVectorType1& _min, const OtherVectorType2& _max) : m_min(_min), m_max(_max) {}
/** Constructs a box containing a single point \a p. */
- inline explicit AlignedBox(const VectorType& p) : m_min(p), m_max(p) {}
+ template<typename Derived>
+ inline explicit AlignedBox(const MatrixBase<Derived>& a_p)
+ {
+ const typename ei_nested<Derived,2>::type p(a_p.derived());
+ m_min = p;
+ m_max = p;
+ }
~AlignedBox() {}
/** \returns the dimension in which the box holds */
inline int dim() const { return AmbientDimAtCompileTime==Dynamic ? m_min.size()-1 : AmbientDimAtCompileTime; }
- /** \returns true if the box is null, i.e, empty. */
- inline bool isNull() const { return (m_min.array() > m_max.array()).any(); }
+ /** \deprecated use isEmpty */
+ inline bool isNull() const { return isEmpty(); }
+
+ /** \deprecated use setEmpty */
+ inline void setNull() { setEmpty(); }
+
+ /** \returns true if the box is empty. */
+ inline bool isEmpty() const { return (m_min.array() > m_max.array()).any(); }
- /** Makes \c *this a null/empty box. */
- inline void setNull()
+ /** Makes \c *this an empty box. */
+ inline void setEmpty()
{
- m_min.setConstant( std::numeric_limits<Scalar>::max());
- m_max.setConstant(-std::numeric_limits<Scalar>::max());
+ m_min.setConstant( ScalarTraits::highest() );
+ m_max.setConstant( ScalarTraits::lowest() );
}
/** \returns the minimal corner */
@@ -86,45 +119,135 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
inline VectorType& max() { return m_max; }
/** \returns the center of the box */
- inline VectorType center() const { return (m_min + m_max) / 2; }
+ inline const CwiseUnaryOp<ei_scalar_quotient1_op<Scalar>,
+ CwiseBinaryOp<ei_scalar_sum_op<Scalar>, VectorType, VectorType> >
+ center() const
+ { return (m_min+m_max)/2; }
+
+ /** \returns the lengths of the sides of the bounding box.
+ * Note that this function does not get the same
+ * result for integral or floating scalar types: see
+ */
+ inline const CwiseBinaryOp< ei_scalar_difference_op<Scalar>, VectorType, VectorType> sizes() const
+ { return m_max - m_min; }
+
+ /** \returns the volume of the bounding box */
+ inline Scalar volume() const
+ { return sizes().prod(); }
+
+ /** \returns an expression for the bounding box diagonal vector
+ * if the length of the diagonal is needed: diagonal().norm()
+ * will provide it.
+ */
+ inline CwiseBinaryOp< ei_scalar_difference_op<Scalar>, VectorType, VectorType> diagonal() const
+ { return sizes(); }
+
+ /** \returns the vertex of the bounding box at the corner defined by
+ * the corner-id corner. It works only for a 1D, 2D or 3D bounding box.
+ * For 1D bounding boxes corners are named by 2 enum constants:
+ * BottomLeft and BottomRight.
+ * For 2D bounding boxes, corners are named by 4 enum constants:
+ * BottomLeft, BottomRight, TopLeft, TopRight.
+ * For 3D bounding boxes, the following names are added:
+ * BottomLeftCeil, BottomRightCeil, TopLeftCeil, TopRightCeil.
+ */
+ inline VectorType corner(CornerType corner) const
+ {
+ EIGEN_STATIC_ASSERT(_AmbientDim <= 3, THIS_METHOD_IS_ONLY_FOR_VECTORS_OF_A_SPECIFIC_SIZE);
+
+ VectorType res;
+
+ int mult = 1;
+ for(int d=0; d<dim(); ++d)
+ {
+ if( mult & corner ) res[d] = m_max[d];
+ else res[d] = m_min[d];
+ mult *= 2;
+ }
+ return res;
+ }
+
+ /** \returns a random point inside the bounding box sampled with
+ * a uniform distribution */
+ inline VectorType sample() const
+ {
+ VectorType r;
+ for(int d=0; d<dim(); ++d)
+ {
+ if(ScalarTraits::HasFloatingPoint)
+ {
+ r[d] = m_min[d] + (m_max[d]-m_min[d])
+ * (ei_random<Scalar>() + ei_random_amplitude<Scalar>())
+ / (Scalar(2)*ei_random_amplitude<Scalar>() );
+ }
+ else
+ r[d] = ei_random(m_min[d], m_max[d]);
+ }
+ return r;
+ }
/** \returns true if the point \a p is inside the box \c *this. */
- inline bool contains(const VectorType& p) const
- { return (m_min.array()<=p.array()).all() && (p.array()<=m_max.array()).all(); }
+ template<typename Derived>
+ inline bool contains(const MatrixBase<Derived>& a_p) const
+ {
+ const typename ei_nested<Derived,2>::type p(a_p.derived());
+ return (m_min.array()<=p.array()).all() && (p.array()<=m_max.array()).all();
+ }
/** \returns true if the box \a b is entirely inside the box \c *this. */
inline bool contains(const AlignedBox& b) const
{ return (m_min.array()<=b.min().array()).all() && (b.max().array()<=m_max.array()).all(); }
/** Extends \c *this such that it contains the point \a p and returns a reference to \c *this. */
- inline AlignedBox& extend(const VectorType& p)
- { m_min = m_min.cwiseMin(p); m_max = m_max.cwiseMax(p); return *this; }
+ template<typename Derived>
+ inline AlignedBox& extend(const MatrixBase<Derived>& a_p)
+ {
+ const typename ei_nested<Derived,2>::type p(a_p.derived());
+ m_min = m_min.cwiseMin(p);
+ m_max = m_max.cwiseMax(p);
+ return *this;
+ }
/** Extends \c *this such that it contains the box \a b and returns a reference to \c *this. */
inline AlignedBox& extend(const AlignedBox& b)
- { m_min = m_min.cwiseMin(b.m_min); m_max = m_max.cwiseMax(b.m_max); return *this; }
+ {
+ m_min = m_min.cwiseMin(b.m_min);
+ m_max = m_max.cwiseMax(b.m_max);
+ return *this;
+ }
/** Clamps \c *this by the box \a b and returns a reference to \c *this. */
inline AlignedBox& clamp(const AlignedBox& b)
- { m_min = m_min.cwiseMax(b.m_min); m_max = m_max.cwiseMin(b.m_max); return *this; }
+ {
+ m_min = m_min.cwiseMax(b.m_min);
+ m_max = m_max.cwiseMin(b.m_max);
+ return *this;
+ }
/** Returns an AlignedBox that is the intersection of \a b and \c *this */
- inline AlignedBox intersection(const AlignedBox &b) const
- { return AlignedBox(m_min.cwiseMax(b.m_min), m_max.cwiseMin(b.m_max)); }
+ inline AlignedBox intersection(const AlignedBox& b) const
+ {return AlignedBox(m_min.cwiseMax(b.m_min), m_max.cwiseMin(b.m_max)); }
/** Returns an AlignedBox that is the union of \a b and \c *this */
- inline AlignedBox merged(const AlignedBox &b) const
+ inline AlignedBox merged(const AlignedBox& b) const
{ return AlignedBox(m_min.cwiseMin(b.m_min), m_max.cwiseMax(b.m_max)); }
/** Translate \c *this by the vector \a t and returns a reference to \c *this. */
- inline AlignedBox& translate(const VectorType& t)
- { m_min += t; m_max += t; return *this; }
+ template<typename Derived>
+ inline AlignedBox& translate(const MatrixBase<Derived>& a_t)
+ {
+ const typename ei_nested<Derived,2>::type t(a_t.derived());
+ m_min += t;
+ m_max += t;
+ return *this;
+ }
/** \returns the squared distance between the point \a p and the box \c *this,
* and zero if \a p is inside the box.
* \sa exteriorDistance()
*/
- inline Scalar squaredExteriorDistance(const VectorType& p) const;
+ template<typename Derived>
+ inline Scalar squaredExteriorDistance(const MatrixBase<Derived>& a_p) const;
/** \returns the squared distance between the boxes \a b and \c *this,
* and zero if the boxes intersect.
@@ -136,15 +259,16 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
* and zero if \a p is inside the box.
* \sa squaredExteriorDistance()
*/
- inline Scalar exteriorDistance(const VectorType& p) const
- { return ei_sqrt(squaredExteriorDistance(p)); }
+ template<typename Derived>
+ inline FloatingPoint exteriorDistance(const MatrixBase<Derived>& p) const
+ { return ei_sqrt(FloatingPoint(squaredExteriorDistance(p))); }
/** \returns the distance between the boxes \a b and \c *this,
* and zero if the boxes intersect.
* \sa squaredExteriorDistance()
*/
- inline Scalar exteriorDistance(const AlignedBox& b) const
- { return ei_sqrt(squaredExteriorDistance(b)); }
+ inline FloatingPoint exteriorDistance(const AlignedBox& b) const
+ { return ei_sqrt(FloatingPoint(squaredExteriorDistance(b))); }
/** \returns \c *this with scalar type casted to \a NewScalarType
*
@@ -171,7 +295,7 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
* determined by \a prec.
*
* \sa MatrixBase::isApprox() */
- bool isApprox(const AlignedBox& other, typename NumTraits<Scalar>::Real prec = dummy_precision<Scalar>()) const
+ bool isApprox(const AlignedBox& other, RealScalar prec = ScalarTraits::dummy_precision()) const
{ return m_min.isApprox(other.m_min, prec) && m_max.isApprox(other.m_max, prec); }
protected:
@@ -179,32 +303,48 @@ protected:
VectorType m_min, m_max;
};
-template<typename Scalar,int AmbiantDim>
-inline Scalar AlignedBox<Scalar,AmbiantDim>::squaredExteriorDistance(const VectorType& p) const
+
+
+template<typename Scalar,int AmbientDim>
+template<typename Derived>
+inline Scalar AlignedBox<Scalar,AmbientDim>::squaredExteriorDistance(const MatrixBase<Derived>& a_p) const
{
+ const typename ei_nested<Derived,2*AmbientDim>::type p(a_p.derived());
Scalar dist2 = 0.;
Scalar aux;
for (int k=0; k<dim(); ++k)
{
- if ((aux = (p[k]-m_min[k]))<Scalar(0))
+ if( m_min[k] > p[k] )
+ {
+ aux = m_min[k] - p[k];
dist2 += aux*aux;
- else if ( (aux = (m_max[k]-p[k]))<Scalar(0) )
+ }
+ else if( p[k] > m_max[k] )
+ {
+ aux = p[k] - m_max[k];
dist2 += aux*aux;
+ }
}
return dist2;
}
-template<typename Scalar,int AmbiantDim>
-inline Scalar AlignedBox<Scalar,AmbiantDim>::squaredExteriorDistance(const AlignedBox& b) const
+template<typename Scalar,int AmbientDim>
+inline Scalar AlignedBox<Scalar,AmbientDim>::squaredExteriorDistance(const AlignedBox& b) const
{
Scalar dist2 = 0.;
Scalar aux;
for (int k=0; k<dim(); ++k)
{
- if ((aux = (b.m_min[k]-m_max[k]))>0.)
+ if( m_min[k] > b.m_max[k] )
+ {
+ aux = m_min[k] - b.m_max[k];
dist2 += aux*aux;
- else if ( (aux = (m_min[k]-b.m_max[k]))>0. )
+ }
+ else if( b.m_min[k] > m_max[k] )
+ {
+ aux = b.m_min[k] - m_max[k];
dist2 += aux*aux;
+ }
}
return dist2;
}
diff --git a/Eigen/src/Geometry/AngleAxis.h b/Eigen/src/Geometry/AngleAxis.h
index da698bbfe..d02ecf4c5 100644
--- a/Eigen/src/Geometry/AngleAxis.h
+++ b/Eigen/src/Geometry/AngleAxis.h
@@ -146,7 +146,7 @@ public:
* determined by \a prec.
*
* \sa MatrixBase::isApprox() */
- bool isApprox(const AngleAxis& other, typename NumTraits<Scalar>::Real prec = dummy_precision<Scalar>()) const
+ bool isApprox(const AngleAxis& other, typename NumTraits<Scalar>::Real prec = NumTraits<Scalar>::dummy_precision()) const
{ return m_axis.isApprox(other.m_axis, prec) && ei_isApprox(m_angle,other.m_angle, prec); }
};
@@ -165,7 +165,7 @@ template<typename QuatDerived>
AngleAxis<Scalar>& AngleAxis<Scalar>::operator=(const QuaternionBase<QuatDerived>& q)
{
Scalar n2 = q.vec().squaredNorm();
- if (n2 < dummy_precision<Scalar>()*dummy_precision<Scalar>())
+ if (n2 < NumTraits<Scalar>::dummy_precision()*NumTraits<Scalar>::dummy_precision())
{
m_angle = 0;
m_axis << 1, 0, 0;
@@ -189,6 +189,16 @@ AngleAxis<Scalar>& AngleAxis<Scalar>::operator=(const MatrixBase<Derived>& mat)
return *this = QuaternionType(mat);
}
+/**
+* \brief Sets \c *this from a 3x3 rotation matrix.
+**/
+template<typename Scalar>
+template<typename Derived>
+AngleAxis<Scalar>& AngleAxis<Scalar>::fromRotationMatrix(const MatrixBase<Derived>& mat)
+{
+ return *this = QuaternionType(mat);
+}
+
/** Constructs and \returns an equivalent 3x3 rotation matrix.
*/
template<typename Scalar>
diff --git a/Eigen/src/Geometry/EulerAngles.h b/Eigen/src/Geometry/EulerAngles.h
index b6dbf8ae9..13d23761a 100644
--- a/Eigen/src/Geometry/EulerAngles.h
+++ b/Eigen/src/Geometry/EulerAngles.h
@@ -50,7 +50,7 @@ MatrixBase<Derived>::eulerAngles(int a0, int a1, int a2) const
Matrix<Scalar,3,1> res;
typedef Matrix<typename Derived::Scalar,2,1> Vector2;
- const Scalar epsilon = dummy_precision<Scalar>();
+ const Scalar epsilon = NumTraits<Scalar>::dummy_precision();
const int odd = ((a0+1)%3 == a1) ? 0 : 1;
const int i = a0;
diff --git a/Eigen/src/Geometry/Homogeneous.h b/Eigen/src/Geometry/Homogeneous.h
index b0232e77c..76ca66c57 100644
--- a/Eigen/src/Geometry/Homogeneous.h
+++ b/Eigen/src/Geometry/Homogeneous.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2009 Gael Guennebaud <g.gael@free.fr>
+// Copyright (C) 2009-2010 Gael Guennebaud <g.gael@free.fr>
//
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
@@ -70,7 +70,8 @@ template<typename MatrixType,int _Direction> class Homogeneous
enum { Direction = _Direction };
- EIGEN_GENERIC_PUBLIC_INTERFACE(Homogeneous)
+ typedef MatrixBase<Homogeneous> Base;
+ EIGEN_DENSE_PUBLIC_INTERFACE(Homogeneous)
inline Homogeneous(const MatrixType& matrix)
: m_matrix(matrix)
diff --git a/Eigen/src/Geometry/Hyperplane.h b/Eigen/src/Geometry/Hyperplane.h
index aab3d5b35..1d0b299ba 100644
--- a/Eigen/src/Geometry/Hyperplane.h
+++ b/Eigen/src/Geometry/Hyperplane.h
@@ -257,7 +257,7 @@ public:
* determined by \a prec.
*
* \sa MatrixBase::isApprox() */
- bool isApprox(const Hyperplane& other, typename NumTraits<Scalar>::Real prec = dummy_precision<Scalar>()) const
+ bool isApprox(const Hyperplane& other, typename NumTraits<Scalar>::Real prec = NumTraits<Scalar>::dummy_precision()) const
{ return m_coeffs.isApprox(other.m_coeffs, prec); }
protected:
diff --git a/Eigen/src/Geometry/ParametrizedLine.h b/Eigen/src/Geometry/ParametrizedLine.h
index 21a5595b9..1846a440a 100644
--- a/Eigen/src/Geometry/ParametrizedLine.h
+++ b/Eigen/src/Geometry/ParametrizedLine.h
@@ -123,7 +123,7 @@ public:
* determined by \a prec.
*
* \sa MatrixBase::isApprox() */
- bool isApprox(const ParametrizedLine& other, typename NumTraits<Scalar>::Real prec = dummy_precision<Scalar>()) const
+ bool isApprox(const ParametrizedLine& other, typename NumTraits<Scalar>::Real prec = NumTraits<Scalar>::dummy_precision()) const
{ return m_origin.isApprox(other.m_origin, prec) && m_direction.isApprox(other.m_direction, prec); }
protected:
diff --git a/Eigen/src/Geometry/Quaternion.h b/Eigen/src/Geometry/Quaternion.h
index b8b41ebcf..67319d15b 100644
--- a/Eigen/src/Geometry/Quaternion.h
+++ b/Eigen/src/Geometry/Quaternion.h
@@ -163,7 +163,7 @@ public:
*
* \sa MatrixBase::isApprox() */
template<class OtherDerived>
- bool isApprox(const QuaternionBase<OtherDerived>& other, RealScalar prec = dummy_precision<Scalar>()) const
+ bool isApprox(const QuaternionBase<OtherDerived>& other, RealScalar prec = NumTraits<Scalar>::dummy_precision()) const
{ return coeffs().isApprox(other.coeffs(), prec); }
/** return the result vector of \a v through the rotation*/
@@ -377,7 +377,8 @@ template <class Derived>
template <class OtherDerived>
EIGEN_STRONG_INLINE Derived& QuaternionBase<Derived>::operator*= (const QuaternionBase<OtherDerived>& other)
{
- return (derived() = derived() * other.derived());
+ derived() = derived() * other.derived();
+ return derived();
}
/** Rotation of a vector by a quaternion.
@@ -507,7 +508,7 @@ inline Derived& QuaternionBase<Derived>::setFromTwoVectors(const MatrixBase<Deri
// under the constraint:
// ||x|| = 1
// which yields a singular value problem
- if (c < Scalar(-1)+dummy_precision<Scalar>())
+ if (c < Scalar(-1)+NumTraits<Scalar>::dummy_precision())
{
c = std::max<Scalar>(c,-1);
Matrix<Scalar,2,3> m; m << v0.transpose(), v1.transpose();
@@ -572,7 +573,7 @@ QuaternionBase<Derived>::angularDistance(const QuaternionBase<OtherDerived>& oth
double d = ei_abs(this->dot(other));
if (d>=1.0)
return Scalar(0);
- return Scalar(2) * std::acos(d);
+ return static_cast<Scalar>(2 * std::acos(d));
}
/** \returns the spherical linear interpolation between the two quaternions
@@ -583,7 +584,7 @@ template <class OtherDerived>
Quaternion<typename ei_traits<Derived>::Scalar>
QuaternionBase<Derived>::slerp(Scalar t, const QuaternionBase<OtherDerived>& other) const
{
- static const Scalar one = Scalar(1) - epsilon<Scalar>();
+ static const Scalar one = Scalar(1) - NumTraits<Scalar>::epsilon();
Scalar d = this->dot(other);
Scalar absD = ei_abs(d);
diff --git a/Eigen/src/Geometry/Rotation2D.h b/Eigen/src/Geometry/Rotation2D.h
index 7f24a3eae..73f731f76 100644
--- a/Eigen/src/Geometry/Rotation2D.h
+++ b/Eigen/src/Geometry/Rotation2D.h
@@ -121,7 +121,7 @@ public:
* determined by \a prec.
*
* \sa MatrixBase::isApprox() */
- bool isApprox(const Rotation2D& other, typename NumTraits<Scalar>::Real prec = dummy_precision<Scalar>()) const
+ bool isApprox(const Rotation2D& other, typename NumTraits<Scalar>::Real prec = NumTraits<Scalar>::dummy_precision()) const
{ return ei_isApprox(m_angle,other.m_angle, prec); }
};
diff --git a/Eigen/src/Geometry/Scaling.h b/Eigen/src/Geometry/Scaling.h
index 4695914fd..2ff8eaba3 100644
--- a/Eigen/src/Geometry/Scaling.h
+++ b/Eigen/src/Geometry/Scaling.h
@@ -107,7 +107,7 @@ public:
* determined by \a prec.
*
* \sa MatrixBase::isApprox() */
- bool isApprox(const UniformScaling& other, typename NumTraits<Scalar>::Real prec = dummy_precision<Scalar>()) const
+ bool isApprox(const UniformScaling& other, typename NumTraits<Scalar>::Real prec = NumTraits<Scalar>::dummy_precision()) const
{ return ei_isApprox(m_factor, other.factor(), prec); }
};
diff --git a/Eigen/src/Geometry/Transform.h b/Eigen/src/Geometry/Transform.h
index b945ea43f..40bc7033a 100644
--- a/Eigen/src/Geometry/Transform.h
+++ b/Eigen/src/Geometry/Transform.h
@@ -345,9 +345,14 @@ public:
/** \sa MatrixBase::setIdentity() */
void setIdentity() { m_matrix.setIdentity(); }
- static const typename MatrixType::IdentityReturnType Identity()
+
+ /**
+ * \brief Returns an identity transformation.
+ * \todo In the future this function should be returning a Transform expression.
+ */
+ static const Transform Identity()
{
- return MatrixType::Identity();
+ return Transform(MatrixType::Identity());
}
template<typename OtherDerived>
@@ -424,7 +429,7 @@ public:
* determined by \a prec.
*
* \sa MatrixBase::isApprox() */
- bool isApprox(const Transform& other, typename NumTraits<Scalar>::Real prec = dummy_precision<Scalar>()) const
+ bool isApprox(const Transform& other, typename NumTraits<Scalar>::Real prec = NumTraits<Scalar>::dummy_precision()) const
{ return m_matrix.isApprox(other.m_matrix, prec); }
/** Sets the last row to [0 ... 0 1]
diff --git a/Eigen/src/Geometry/Translation.h b/Eigen/src/Geometry/Translation.h
index b7477df9f..1e3906bde 100644
--- a/Eigen/src/Geometry/Translation.h
+++ b/Eigen/src/Geometry/Translation.h
@@ -154,7 +154,7 @@ public:
* determined by \a prec.
*
* \sa MatrixBase::isApprox() */
- bool isApprox(const Translation& other, typename NumTraits<Scalar>::Real prec = dummy_precision<Scalar>()) const
+ bool isApprox(const Translation& other, typename NumTraits<Scalar>::Real prec = NumTraits<Scalar>::dummy_precision()) const
{ return m_coeffs.isApprox(other.m_coeffs, prec); }
};
diff --git a/Eigen/src/Geometry/Umeyama.h b/Eigen/src/Geometry/Umeyama.h
index 5be098d77..c5d99d533 100644
--- a/Eigen/src/Geometry/Umeyama.h
+++ b/Eigen/src/Geometry/Umeyama.h
@@ -45,10 +45,10 @@ namespace
struct ei_umeyama_transform_matrix_type
{
enum {
- MinRowsAtCompileTime = EIGEN_ENUM_MIN(MatrixType::RowsAtCompileTime, OtherMatrixType::RowsAtCompileTime),
+ MinRowsAtCompileTime = EIGEN_SIZE_MIN(MatrixType::RowsAtCompileTime, OtherMatrixType::RowsAtCompileTime),
// When possible we want to choose some small fixed size value since the result
- // is likely to fit on the stack.
+ // is likely to fit on the stack. Here EIGEN_ENUM_MIN is really what we want.
HomogeneousDimension = EIGEN_ENUM_MIN(MinRowsAtCompileTime+1, Dynamic)
};
@@ -114,7 +114,7 @@ umeyama(const MatrixBase<Derived>& src, const MatrixBase<OtherDerived>& dst, boo
EIGEN_STATIC_ASSERT((ei_is_same_type<Scalar, typename ei_traits<OtherDerived>::Scalar>::ret),
YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
- enum { Dimension = EIGEN_ENUM_MIN(Derived::RowsAtCompileTime, OtherDerived::RowsAtCompileTime) };
+ enum { Dimension = EIGEN_SIZE_MIN(Derived::RowsAtCompileTime, OtherDerived::RowsAtCompileTime) };
typedef Matrix<Scalar, Dimension, 1> VectorType;
typedef Matrix<Scalar, Dimension, Dimension> MatrixType;
diff --git a/Eigen/src/Householder/Householder.h b/Eigen/src/Householder/Householder.h
index cfd3935fc..d86e287fa 100644
--- a/Eigen/src/Householder/Householder.h
+++ b/Eigen/src/Householder/Householder.h
@@ -45,7 +45,7 @@ void makeTrivialHouseholder(
template<typename Derived>
void MatrixBase<Derived>::makeHouseholderInPlace(Scalar& tau, RealScalar& beta)
{
- VectorBlock<Derived, ei_decrement_size<SizeAtCompileTime>::ret> essentialPart(derived(), 1, size()-1);
+ VectorBlock<Derived, ei_decrement_size<Base::SizeAtCompileTime>::ret> essentialPart(derived(), 1, size()-1);
makeHouseholder(essentialPart, tau, beta);
}
@@ -99,7 +99,7 @@ void MatrixBase<Derived>::applyHouseholderOnTheLeft(
const Scalar& tau,
Scalar* workspace)
{
- Map<Matrix<Scalar, 1, ColsAtCompileTime, PlainMatrixType::Options, 1, MaxColsAtCompileTime> > tmp(workspace,cols());
+ Map<Matrix<Scalar, 1, Base::ColsAtCompileTime, PlainMatrixType::Options, 1, Base::MaxColsAtCompileTime> > tmp(workspace,cols());
Block<Derived, EssentialPart::SizeAtCompileTime, Derived::ColsAtCompileTime> bottom(derived(), 1, 0, rows()-1, cols());
tmp.noalias() = essential.adjoint() * bottom;
tmp += this->row(0);
@@ -114,7 +114,7 @@ void MatrixBase<Derived>::applyHouseholderOnTheRight(
const Scalar& tau,
Scalar* workspace)
{
- Map<Matrix<Scalar, RowsAtCompileTime, 1, PlainMatrixType::Options, MaxRowsAtCompileTime, 1> > tmp(workspace,rows());
+ Map<Matrix<Scalar, Base::RowsAtCompileTime, 1, PlainMatrixType::Options, Base::MaxRowsAtCompileTime, 1> > tmp(workspace,rows());
Block<Derived, Derived::RowsAtCompileTime, EssentialPart::SizeAtCompileTime> right(derived(), 0, 1, rows(), cols()-1);
tmp.noalias() = right * essential.conjugate();
tmp += this->col(0);
diff --git a/Eigen/src/LU/Determinant.h b/Eigen/src/LU/Determinant.h
index bae01256e..fb6577f08 100644
--- a/Eigen/src/LU/Determinant.h
+++ b/Eigen/src/LU/Determinant.h
@@ -99,7 +99,7 @@ template<typename Derived>
inline typename ei_traits<Derived>::Scalar MatrixBase<Derived>::determinant() const
{
assert(rows() == cols());
- typedef typename ei_nested<Derived,RowsAtCompileTime>::type Nested;
+ typedef typename ei_nested<Derived,Base::RowsAtCompileTime>::type Nested;
Nested nested(derived());
return ei_determinant_impl<typename ei_cleantype<Nested>::type>::run(nested);
}
diff --git a/Eigen/src/LU/FullPivLU.h b/Eigen/src/LU/FullPivLU.h
index 148ddcd23..72e878223 100644
--- a/Eigen/src/LU/FullPivLU.h
+++ b/Eigen/src/LU/FullPivLU.h
@@ -276,7 +276,7 @@ template<typename _MatrixType> class FullPivLU
return m_usePrescribedThreshold ? m_prescribedThreshold
// this formula comes from experimenting (see "LU precision tuning" thread on the list)
// and turns out to be identical to Higham's formula used already in LDLt.
- : epsilon<Scalar>() * m_lu.diagonalSize();
+ : NumTraits<Scalar>::epsilon() * m_lu.diagonalSize();
}
/** \returns the rank of the matrix of which *this is the LU decomposition.
@@ -476,7 +476,7 @@ typename ei_traits<MatrixType>::Scalar FullPivLU<MatrixType>::determinant() cons
{
ei_assert(m_isInitialized && "LU is not initialized.");
ei_assert(m_lu.rows() == m_lu.cols() && "You can't take the determinant of a non-square matrix!");
- return Scalar(m_det_pq) * m_lu.diagonal().prod();
+ return Scalar(m_det_pq) * Scalar(m_lu.diagonal().prod());
}
/********* Implementation of kernel() **************************************************/
@@ -487,7 +487,7 @@ struct ei_kernel_retval<FullPivLU<_MatrixType> >
{
EIGEN_MAKE_KERNEL_HELPERS(FullPivLU<_MatrixType>)
- enum { MaxSmallDimAtCompileTime = EIGEN_ENUM_MIN(
+ enum { MaxSmallDimAtCompileTime = EIGEN_SIZE_MIN(
MatrixType::MaxColsAtCompileTime,
MatrixType::MaxRowsAtCompileTime)
};
@@ -572,7 +572,7 @@ struct ei_image_retval<FullPivLU<_MatrixType> >
{
EIGEN_MAKE_IMAGE_HELPERS(FullPivLU<_MatrixType>)
- enum { MaxSmallDimAtCompileTime = EIGEN_ENUM_MIN(
+ enum { MaxSmallDimAtCompileTime = EIGEN_SIZE_MIN(
MatrixType::MaxColsAtCompileTime,
MatrixType::MaxRowsAtCompileTime)
};
diff --git a/Eigen/src/LU/PartialPivLU.h b/Eigen/src/LU/PartialPivLU.h
index f50aa4535..809e4aad6 100644
--- a/Eigen/src/LU/PartialPivLU.h
+++ b/Eigen/src/LU/PartialPivLU.h
@@ -67,7 +67,7 @@ template<typename _MatrixType> class PartialPivLU
typedef Matrix<int, MatrixType::RowsAtCompileTime, 1> PermutationVectorType;
typedef PermutationMatrix<MatrixType::RowsAtCompileTime> PermutationType;
- enum { MaxSmallDimAtCompileTime = EIGEN_ENUM_MIN(
+ enum { MaxSmallDimAtCompileTime = EIGEN_SIZE_MIN(
MatrixType::MaxColsAtCompileTime,
MatrixType::MaxRowsAtCompileTime)
};
diff --git a/Eigen/src/LU/arch/Inverse_SSE.h b/Eigen/src/LU/arch/Inverse_SSE.h
index 2ad371a7b..9da39bf20 100644
--- a/Eigen/src/LU/arch/Inverse_SSE.h
+++ b/Eigen/src/LU/arch/Inverse_SSE.h
@@ -158,8 +158,8 @@ struct ei_compute_inverse_size4<Architecture::SSE, double, MatrixType, ResultTyp
{
static void run(const MatrixType& matrix, ResultType& result)
{
- const EIGEN_ALIGN16 long long int _Sign_NP[2] = { 0x8000000000000000, 0x0000000000000000 };
- const EIGEN_ALIGN16 long long int _Sign_PN[2] = { 0x0000000000000000, 0x8000000000000000 };
+ const EIGEN_ALIGN16 long long int _Sign_NP[2] = { 0x8000000000000000ll, 0x0000000000000000ll };
+ const EIGEN_ALIGN16 long long int _Sign_PN[2] = { 0x0000000000000000ll, 0x8000000000000000ll };
// The inverse is calculated using "Divide and Conquer" technique. The
// original matrix is divide into four 2x2 sub-matrices. Since each
diff --git a/Eigen/src/QR/ColPivHouseholderQR.h b/Eigen/src/QR/ColPivHouseholderQR.h
index a119d0c2f..bf28605dd 100644
--- a/Eigen/src/QR/ColPivHouseholderQR.h
+++ b/Eigen/src/QR/ColPivHouseholderQR.h
@@ -51,7 +51,7 @@ template<typename _MatrixType> class ColPivHouseholderQR
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
Options = MatrixType::Options,
- DiagSizeAtCompileTime = EIGEN_ENUM_MIN(ColsAtCompileTime,RowsAtCompileTime)
+ DiagSizeAtCompileTime = EIGEN_SIZE_MIN(ColsAtCompileTime,RowsAtCompileTime)
};
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
@@ -282,7 +282,7 @@ template<typename _MatrixType> class ColPivHouseholderQR
return m_usePrescribedThreshold ? m_prescribedThreshold
// this formula comes from experimenting (see "LU precision tuning" thread on the list)
// and turns out to be identical to Higham's formula used already in LDLt.
- : epsilon<Scalar>() * m_qr.diagonalSize();
+ : NumTraits<Scalar>::epsilon() * m_qr.diagonalSize();
}
/** \returns the number of nonzero pivots in the QR decomposition.
@@ -350,7 +350,7 @@ ColPivHouseholderQR<MatrixType>& ColPivHouseholderQR<MatrixType>::compute(const
for(int k = 0; k < cols; ++k)
colSqNorms.coeffRef(k) = m_qr.col(k).squaredNorm();
- RealScalar threshold_helper = colSqNorms.maxCoeff() * ei_abs2(epsilon<Scalar>()) / rows;
+ RealScalar threshold_helper = colSqNorms.maxCoeff() * ei_abs2(NumTraits<Scalar>::epsilon()) / rows;
m_nonzero_pivots = size; // the generic case is that in which all pivots are nonzero (invertible case)
m_maxpivot = RealScalar(0);
diff --git a/Eigen/src/QR/FullPivHouseholderQR.h b/Eigen/src/QR/FullPivHouseholderQR.h
index 717ff19f8..1ec60aeaf 100644
--- a/Eigen/src/QR/FullPivHouseholderQR.h
+++ b/Eigen/src/QR/FullPivHouseholderQR.h
@@ -51,7 +51,7 @@ template<typename _MatrixType> class FullPivHouseholderQR
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
Options = MatrixType::Options,
- DiagSizeAtCompileTime = EIGEN_ENUM_MIN(ColsAtCompileTime,RowsAtCompileTime)
+ DiagSizeAtCompileTime = EIGEN_SIZE_MIN(ColsAtCompileTime,RowsAtCompileTime)
};
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
@@ -270,7 +270,7 @@ FullPivHouseholderQR<MatrixType>& FullPivHouseholderQR<MatrixType>::compute(cons
RowVectorType temp(cols);
- m_precision = epsilon<Scalar>() * size;
+ m_precision = NumTraits<Scalar>::epsilon() * size;
m_rows_transpositions.resize(matrix.rows());
IntRowVectorType cols_transpositions(matrix.cols());
@@ -370,7 +370,7 @@ struct ei_solve_retval<FullPivHouseholderQR<_MatrixType>, Rhs>
RealScalar biggest_in_upper_part_of_c = c.corner(TopLeft, dec().rank(), c.cols()).cwiseAbs().maxCoeff();
RealScalar biggest_in_lower_part_of_c = c.corner(BottomLeft, rows-dec().rank(), c.cols()).cwiseAbs().maxCoeff();
// FIXME brain dead
- const RealScalar m_precision = epsilon<Scalar>() * std::min(rows,cols);
+ const RealScalar m_precision = NumTraits<Scalar>::epsilon() * std::min(rows,cols);
if(!ei_isMuchSmallerThan(biggest_in_lower_part_of_c, biggest_in_upper_part_of_c, m_precision))
return;
}
diff --git a/Eigen/src/SVD/JacobiSVD.h b/Eigen/src/SVD/JacobiSVD.h
index 55fac3d12..94375725f 100644
--- a/Eigen/src/SVD/JacobiSVD.h
+++ b/Eigen/src/SVD/JacobiSVD.h
@@ -295,7 +295,7 @@ JacobiSVD<MatrixType, Options>& JacobiSVD<MatrixType, Options>::compute(const Ma
int cols = matrix.cols();
int diagSize = std::min(rows, cols);
m_singularValues.resize(diagSize);
- const RealScalar precision = 2 * epsilon<Scalar>();
+ const RealScalar precision = 2 * NumTraits<Scalar>::epsilon();
if(!ei_svd_precondition_if_more_rows_than_cols<MatrixType, Options>::run(matrix, work_matrix, *this)
&& !ei_svd_precondition_if_more_cols_than_rows<MatrixType, Options>::run(matrix, work_matrix, *this))
diff --git a/Eigen/src/SVD/SVD.h b/Eigen/src/SVD/SVD.h
index cd8c11b8d..c308ff3ee 100644
--- a/Eigen/src/SVD/SVD.h
+++ b/Eigen/src/SVD/SVD.h
@@ -52,7 +52,7 @@ template<typename _MatrixType> class SVD
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
PacketSize = ei_packet_traits<Scalar>::size,
AlignmentMask = int(PacketSize)-1,
- MinSize = EIGEN_ENUM_MIN(RowsAtCompileTime, ColsAtCompileTime)
+ MinSize = EIGEN_SIZE_MIN(RowsAtCompileTime, ColsAtCompileTime)
};
typedef Matrix<Scalar, RowsAtCompileTime, 1> ColVector;
@@ -193,7 +193,7 @@ SVD<MatrixType>& SVD<MatrixType>::compute(const MatrixType& matrix)
int i=0,its=0,j=0,k=0,l=0,nm=0;
Scalar anorm, c, f, g, h, s, scale, x, y, z;
bool convergence = true;
- Scalar eps = dummy_precision<Scalar>();
+ Scalar eps = NumTraits<Scalar>::dummy_precision();
Matrix<Scalar,Dynamic,1> rv1(n);
g = scale = anorm = 0;
diff --git a/Eigen/src/Sparse/AmbiVector.h b/Eigen/src/Sparse/AmbiVector.h
index 6bb6ee3e4..1ac28272b 100644
--- a/Eigen/src/Sparse/AmbiVector.h
+++ b/Eigen/src/Sparse/AmbiVector.h
@@ -296,7 +296,7 @@ class AmbiVector<_Scalar>::Iterator
* In practice, all coefficients having a magnitude smaller than \a epsilon
* are skipped.
*/
- Iterator(const AmbiVector& vec, RealScalar epsilon = RealScalar(0.1)*dummy_precision<RealScalar>())
+ Iterator(const AmbiVector& vec, RealScalar epsilon = RealScalar(0.1)*NumTraits<RealScalar>::dummy_precision())
: m_vector(vec)
{
m_epsilon = epsilon;
diff --git a/Eigen/src/Sparse/CompressedStorage.h b/Eigen/src/Sparse/CompressedStorage.h
index b25b05e91..4fc1797d1 100644
--- a/Eigen/src/Sparse/CompressedStorage.h
+++ b/Eigen/src/Sparse/CompressedStorage.h
@@ -185,7 +185,7 @@ class CompressedStorage
return m_values[id];
}
- void prune(Scalar reference, RealScalar epsilon = dummy_precision<RealScalar>())
+ void prune(Scalar reference, RealScalar epsilon = NumTraits<RealScalar>::dummy_precision())
{
size_t k = 0;
size_t n = size();
diff --git a/Eigen/src/Sparse/DynamicSparseMatrix.h b/Eigen/src/Sparse/DynamicSparseMatrix.h
index 4373e1cda..2594ffebc 100644
--- a/Eigen/src/Sparse/DynamicSparseMatrix.h
+++ b/Eigen/src/Sparse/DynamicSparseMatrix.h
@@ -52,19 +52,12 @@ struct ei_traits<DynamicSparseMatrix<_Scalar, _Flags> >
ColsAtCompileTime = Dynamic,
MaxRowsAtCompileTime = Dynamic,
MaxColsAtCompileTime = Dynamic,
- Flags = _Flags,
+ Flags = _Flags | NestByRefBit,
CoeffReadCost = NumTraits<Scalar>::ReadCost,
SupportedAccessPatterns = OuterRandomAccessPattern
};
};
-template<typename _Scalar, int _Options>
-struct ei_ref_selector< DynamicSparseMatrix<_Scalar, _Options> >
-{
- typedef DynamicSparseMatrix<_Scalar, _Options> MatrixType;
- typedef MatrixType const& type;
-};
-
template<typename _Scalar, int _Flags>
class DynamicSparseMatrix
: public SparseMatrixBase<DynamicSparseMatrix<_Scalar, _Flags> >
@@ -216,7 +209,7 @@ class DynamicSparseMatrix
inline void finalize() {}
- void prune(Scalar reference, RealScalar epsilon = dummy_precision<RealScalar>())
+ void prune(Scalar reference, RealScalar epsilon = NumTraits<RealScalar>::dummy_precision())
{
for (int j=0; j<outerSize(); ++j)
m_data[j].prune(reference,epsilon);
diff --git a/Eigen/src/Sparse/SparseBlock.h b/Eigen/src/Sparse/SparseBlock.h
index a8c1f9047..2e16856eb 100644
--- a/Eigen/src/Sparse/SparseBlock.h
+++ b/Eigen/src/Sparse/SparseBlock.h
@@ -1,8 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
-// Copyright (C) 2008 Daniel Gomez Ferro <dgomezferro@gmail.com>
+// Copyright (C) 2008-2009 Gael Guennebaud <g.gael@free.fr>
//
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
@@ -406,98 +405,4 @@ template<typename Derived>
const SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::innerVectors(int outerStart, int outerSize) const
{ return SparseInnerVectorSet<Derived,Dynamic>(derived(), outerStart, outerSize); }
-# if 0
-template<typename MatrixType, int BlockRows, int BlockCols, int PacketAccess>
-class Block<MatrixType,BlockRows,BlockCols,PacketAccess,IsSparse>
- : public SparseMatrixBase<Block<MatrixType,BlockRows,BlockCols,PacketAccess,IsSparse> >
-{
-public:
-
- _EIGEN_GENERIC_PUBLIC_INTERFACE(Block, SparseMatrixBase<Block>)
- class InnerIterator;
-
- /** Column or Row constructor
- */
- inline Block(const MatrixType& matrix, int i)
- : m_matrix(matrix),
- // It is a row if and only if BlockRows==1 and BlockCols==MatrixType::ColsAtCompileTime,
- // and it is a column if and only if BlockRows==MatrixType::RowsAtCompileTime and BlockCols==1,
- // all other cases are invalid.
- // The case a 1x1 matrix seems ambiguous, but the result is the same anyway.
- m_startRow( (BlockRows==1) && (BlockCols==MatrixType::ColsAtCompileTime) ? i : 0),
- m_startCol( (BlockRows==MatrixType::RowsAtCompileTime) && (BlockCols==1) ? i : 0),
- m_blockRows(matrix.rows()), // if it is a row, then m_blockRows has a fixed-size of 1, so no pb to try to overwrite it
- m_blockCols(matrix.cols()) // same for m_blockCols
- {
- ei_assert( (i>=0) && (
- ((BlockRows==1) && (BlockCols==MatrixType::ColsAtCompileTime) && i<matrix.rows())
- ||((BlockRows==MatrixType::RowsAtCompileTime) && (BlockCols==1) && i<matrix.cols())));
- }
-
- /** Fixed-size constructor
- */
- inline Block(const MatrixType& matrix, int startRow, int startCol)
- : m_matrix(matrix), m_startRow(startRow), m_startCol(startCol),
- m_blockRows(matrix.rows()), m_blockCols(matrix.cols())
- {
- EIGEN_STATIC_ASSERT(RowsAtCompileTime!=Dynamic && RowsAtCompileTime!=Dynamic,THIS_METHOD_IS_ONLY_FOR_FIXED_SIZE)
- ei_assert(startRow >= 0 && BlockRows >= 1 && startRow + BlockRows <= matrix.rows()
- && startCol >= 0 && BlockCols >= 1 && startCol + BlockCols <= matrix.cols());
- }
-
- /** Dynamic-size constructor
- */
- inline Block(const MatrixType& matrix,
- int startRow, int startCol,
- int blockRows, int blockCols)
- : m_matrix(matrix), m_startRow(startRow), m_startCol(startCol),
- m_blockRows(blockRows), m_blockCols(blockCols)
- {
- ei_assert((RowsAtCompileTime==Dynamic || RowsAtCompileTime==blockRows)
- && (ColsAtCompileTime==Dynamic || ColsAtCompileTime==blockCols));
- ei_assert(startRow >= 0 && blockRows >= 1 && startRow + blockRows <= matrix.rows()
- && startCol >= 0 && blockCols >= 1 && startCol + blockCols <= matrix.cols());
- }
-
- inline int rows() const { return m_blockRows.value(); }
- inline int cols() const { return m_blockCols.value(); }
-
- inline int stride(void) const { return m_matrix.stride(); }
-
- inline Scalar& coeffRef(int row, int col)
- {
- return m_matrix.const_cast_derived()
- .coeffRef(row + m_startRow.value(), col + m_startCol.value());
- }
-
- inline const Scalar coeff(int row, int col) const
- {
- return m_matrix.coeff(row + m_startRow.value(), col + m_startCol.value());
- }
-
- inline Scalar& coeffRef(int index)
- {
- return m_matrix.const_cast_derived()
- .coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
- m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
- }
-
- inline const Scalar coeff(int index) const
- {
- return m_matrix
- .coeff(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
- m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
- }
-
- protected:
-
- const typename MatrixType::Nested m_matrix;
- const ei_int_if_dynamic<MatrixType::RowsAtCompileTime == 1 ? 0 : Dynamic> m_startRow;
- const ei_int_if_dynamic<MatrixType::ColsAtCompileTime == 1 ? 0 : Dynamic> m_startCol;
- const ei_int_if_dynamic<RowsAtCompileTime> m_blockRows;
- const ei_int_if_dynamic<ColsAtCompileTime> m_blockCols;
-
-};
-#endif
-
#endif // EIGEN_SPARSE_BLOCK_H
diff --git a/Eigen/src/Sparse/SparseCwiseUnaryOp.h b/Eigen/src/Sparse/SparseCwiseUnaryOp.h
index eb2c99375..1d1e22c5c 100644
--- a/Eigen/src/Sparse/SparseCwiseUnaryOp.h
+++ b/Eigen/src/Sparse/SparseCwiseUnaryOp.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2008-2009 Gael Guennebaud <g.gael@free.fr>
+// Copyright (C) 2008-2010 Gael Guennebaud <g.gael@free.fr>
//
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
@@ -60,7 +60,7 @@ class CwiseUnaryOpImpl<UnaryOp,MatrixType,Sparse>::InnerIterator
public:
EIGEN_STRONG_INLINE InnerIterator(const CwiseUnaryOpImpl& unaryOp, int outer)
- : m_iter(unaryOp.derived().nestedExpression(),outer), m_functor(unaryOp.derived()._functor())
+ : m_iter(unaryOp.derived().nestedExpression(),outer), m_functor(unaryOp.derived().functor())
{}
EIGEN_STRONG_INLINE InnerIterator& operator++()
@@ -101,7 +101,7 @@ class CwiseUnaryViewImpl<ViewOp,MatrixType,Sparse>::InnerIterator
public:
EIGEN_STRONG_INLINE InnerIterator(const CwiseUnaryViewImpl& unaryView, int outer)
- : m_iter(unaryView.derived().nestedExpression(),outer), m_functor(unaryView.derived()._functor())
+ : m_iter(unaryView.derived().nestedExpression(),outer), m_functor(unaryView.derived().functor())
{}
EIGEN_STRONG_INLINE InnerIterator& operator++()
diff --git a/Eigen/src/Sparse/SparseExpressionMaker.h b/Eigen/src/Sparse/SparseExpressionMaker.h
deleted file mode 100644
index 8e31d55ef..000000000
--- a/Eigen/src/Sparse/SparseExpressionMaker.h
+++ /dev/null
@@ -1,42 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2009 Gael Guennebaud <g.gael@free.fr>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_SPARSE_EXPRESSIONMAKER_H
-#define EIGEN_SPARSE_EXPRESSIONMAKER_H
-
-template<typename Func, typename XprType>
-struct MakeCwiseUnaryOp<Func,XprType,IsSparse>
-{
- typedef SparseCwiseUnaryOp<Func,XprType> Type;
-};
-
-template<typename Func, typename A, typename B>
-struct MakeCwiseBinaryOp<Func,A,B,IsSparse>
-{
- typedef SparseCwiseBinaryOp<Func,A,B> Type;
-};
-
-// TODO complete the list
-
-#endif // EIGEN_SPARSE_EXPRESSIONMAKER_H
diff --git a/Eigen/src/Sparse/SparseLDLT.h b/Eigen/src/Sparse/SparseLDLT.h
index 7dfcf329c..28797a6c4 100644
--- a/Eigen/src/Sparse/SparseLDLT.h
+++ b/Eigen/src/Sparse/SparseLDLT.h
@@ -94,7 +94,7 @@ class SparseLDLT
: m_flags(flags), m_status(0)
{
ei_assert((MatrixType::Flags&RowMajorBit)==0);
- m_precision = RealScalar(0.1) * Eigen::dummy_precision<RealScalar>();
+ m_precision = RealScalar(0.1) * Eigen::NumTraits<RealScalar>::dummy_precision();
}
/** Creates a LDLT object and compute the respective factorization of \a matrix using
@@ -103,7 +103,7 @@ class SparseLDLT
: m_matrix(matrix.rows(), matrix.cols()), m_flags(flags), m_status(0)
{
ei_assert((MatrixType::Flags&RowMajorBit)==0);
- m_precision = RealScalar(0.1) * Eigen::dummy_precision<RealScalar>();
+ m_precision = RealScalar(0.1) * Eigen::NumTraits<RealScalar>::dummy_precision();
compute(matrix);
}
diff --git a/Eigen/src/Sparse/SparseLLT.h b/Eigen/src/Sparse/SparseLLT.h
index 9018fe0df..a1c10ba13 100644
--- a/Eigen/src/Sparse/SparseLLT.h
+++ b/Eigen/src/Sparse/SparseLLT.h
@@ -54,7 +54,7 @@ class SparseLLT
SparseLLT(int flags = 0)
: m_flags(flags), m_status(0)
{
- m_precision = RealScalar(0.1) * Eigen::dummy_precision<RealScalar>();
+ m_precision = RealScalar(0.1) * Eigen::NumTraits<RealScalar>::dummy_precision();
}
/** Creates a LLT object and compute the respective factorization of \a matrix using
@@ -62,7 +62,7 @@ class SparseLLT
SparseLLT(const MatrixType& matrix, int flags = 0)
: m_matrix(matrix.rows(), matrix.cols()), m_flags(flags), m_status(0)
{
- m_precision = RealScalar(0.1) * Eigen::dummy_precision<RealScalar>();
+ m_precision = RealScalar(0.1) * Eigen::NumTraits<RealScalar>::dummy_precision();
compute(matrix);
}
diff --git a/Eigen/src/Sparse/SparseLU.h b/Eigen/src/Sparse/SparseLU.h
index 0802b0807..3fe9df1ef 100644
--- a/Eigen/src/Sparse/SparseLU.h
+++ b/Eigen/src/Sparse/SparseLU.h
@@ -59,7 +59,7 @@ class SparseLU
SparseLU(int flags = 0)
: m_flags(flags), m_status(0)
{
- m_precision = RealScalar(0.1) * Eigen::dummy_precision<RealScalar>();
+ m_precision = RealScalar(0.1) * Eigen::NumTraits<RealScalar>::dummy_precision();
}
/** Creates a LU object and compute the respective factorization of \a matrix using
@@ -67,7 +67,7 @@ class SparseLU
SparseLU(const MatrixType& matrix, int flags = 0)
: /*m_matrix(matrix.rows(), matrix.cols()),*/ m_flags(flags), m_status(0)
{
- m_precision = RealScalar(0.1) * Eigen::dummy_precision<RealScalar>();
+ m_precision = RealScalar(0.1) * Eigen::NumTraits<RealScalar>::dummy_precision();
compute(matrix);
}
diff --git a/Eigen/src/Sparse/SparseMatrix.h b/Eigen/src/Sparse/SparseMatrix.h
index d884e7df8..68385f564 100644
--- a/Eigen/src/Sparse/SparseMatrix.h
+++ b/Eigen/src/Sparse/SparseMatrix.h
@@ -51,20 +51,13 @@ struct ei_traits<SparseMatrix<_Scalar, _Options> >
ColsAtCompileTime = Dynamic,
MaxRowsAtCompileTime = Dynamic,
MaxColsAtCompileTime = Dynamic,
- Flags = _Options,
+ Flags = _Options | NestByRefBit,
CoeffReadCost = NumTraits<Scalar>::ReadCost,
SupportedAccessPatterns = InnerRandomAccessPattern
};
};
template<typename _Scalar, int _Options>
-struct ei_ref_selector<SparseMatrix<_Scalar, _Options> >
-{
- typedef SparseMatrix<_Scalar, _Options> MatrixType;
- typedef MatrixType const& type;
-};
-
-template<typename _Scalar, int _Options>
class SparseMatrix
: public SparseMatrixBase<SparseMatrix<_Scalar, _Options> >
{
@@ -357,7 +350,7 @@ class SparseMatrix
}
}
- void prune(Scalar reference, RealScalar epsilon = dummy_precision<RealScalar>())
+ void prune(Scalar reference, RealScalar epsilon = NumTraits<RealScalar>::dummy_precision())
{
int k = 0;
for (int j=0; j<m_outerSize; ++j)
diff --git a/Eigen/src/Sparse/SparseMatrixBase.h b/Eigen/src/Sparse/SparseMatrixBase.h
index 9e426c2b9..205325939 100644
--- a/Eigen/src/Sparse/SparseMatrixBase.h
+++ b/Eigen/src/Sparse/SparseMatrixBase.h
@@ -513,32 +513,32 @@ template<typename Derived> class SparseMatrixBase : public AnyMatrixBase<Derived
template<typename OtherDerived>
bool isApprox(const SparseMatrixBase<OtherDerived>& other,
- RealScalar prec = dummy_precision<Scalar>()) const
+ RealScalar prec = NumTraits<Scalar>::dummy_precision()) const
{ return toDense().isApprox(other.toDense(),prec); }
template<typename OtherDerived>
bool isApprox(const MatrixBase<OtherDerived>& other,
- RealScalar prec = dummy_precision<Scalar>()) const
+ RealScalar prec = NumTraits<Scalar>::dummy_precision()) const
{ return toDense().isApprox(other,prec); }
// bool isMuchSmallerThan(const RealScalar& other,
-// RealScalar prec = dummy_precision<Scalar>()) const;
+// RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
// template<typename OtherDerived>
// bool isMuchSmallerThan(const MatrixBase<OtherDerived>& other,
-// RealScalar prec = dummy_precision<Scalar>()) const;
+// RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
-// bool isApproxToConstant(const Scalar& value, RealScalar prec = dummy_precision<Scalar>()) const;
-// bool isZero(RealScalar prec = dummy_precision<Scalar>()) const;
-// bool isOnes(RealScalar prec = dummy_precision<Scalar>()) const;
-// bool isIdentity(RealScalar prec = dummy_precision<Scalar>()) const;
-// bool isDiagonal(RealScalar prec = dummy_precision<Scalar>()) const;
+// bool isApproxToConstant(const Scalar& value, RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+// bool isZero(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+// bool isOnes(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+// bool isIdentity(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+// bool isDiagonal(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
-// bool isUpper(RealScalar prec = dummy_precision<Scalar>()) const;
-// bool isLower(RealScalar prec = dummy_precision<Scalar>()) const;
+// bool isUpper(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+// bool isLower(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
// template<typename OtherDerived>
// bool isOrthogonal(const MatrixBase<OtherDerived>& other,
-// RealScalar prec = dummy_precision<Scalar>()) const;
-// bool isUnitary(RealScalar prec = dummy_precision<Scalar>()) const;
+// RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+// bool isUnitary(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
// template<typename OtherDerived>
// inline bool operator==(const MatrixBase<OtherDerived>& other) const
diff --git a/Eigen/src/Sparse/SparseProduct.h b/Eigen/src/Sparse/SparseProduct.h
index 7aa81312e..a56bc7601 100644
--- a/Eigen/src/Sparse/SparseProduct.h
+++ b/Eigen/src/Sparse/SparseProduct.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2008-2009 Gael Guennebaud <g.gael@free.fr>
+// Copyright (C) 2008-2010 Gael Guennebaud <g.gael@free.fr>
//
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
@@ -93,7 +93,8 @@ class SparseProduct : ei_no_assignment_operator,
{
public:
- EIGEN_GENERIC_PUBLIC_INTERFACE(SparseProduct)
+ typedef typename ei_traits<SparseProduct<LhsNested, RhsNested> >::Base Base;
+ EIGEN_DENSE_PUBLIC_INTERFACE(SparseProduct)
private:
diff --git a/Eigen/src/Sparse/SparseVector.h b/Eigen/src/Sparse/SparseVector.h
index 5ac3f6be7..58f3ec342 100644
--- a/Eigen/src/Sparse/SparseVector.h
+++ b/Eigen/src/Sparse/SparseVector.h
@@ -46,20 +46,13 @@ struct ei_traits<SparseVector<_Scalar, _Options> >
ColsAtCompileTime = IsColVector ? 1 : Dynamic,
MaxRowsAtCompileTime = RowsAtCompileTime,
MaxColsAtCompileTime = ColsAtCompileTime,
- Flags = _Options,
+ Flags = _Options | NestByRefBit,
CoeffReadCost = NumTraits<Scalar>::ReadCost,
SupportedAccessPatterns = InnerRandomAccessPattern
};
};
template<typename _Scalar, int _Options>
-struct ei_ref_selector< SparseVector<_Scalar, _Options> >
-{
- typedef SparseVector<_Scalar, _Options> MatrixType;
- typedef MatrixType const& type;
-};
-
-template<typename _Scalar, int _Options>
class SparseVector
: public SparseMatrixBase<SparseVector<_Scalar, _Options> >
{
@@ -209,7 +202,7 @@ class SparseVector
EIGEN_DEPRECATED void endFill() {}
inline void finalize() {}
- void prune(Scalar reference, RealScalar epsilon = dummy_precision<RealScalar>())
+ void prune(Scalar reference, RealScalar epsilon = NumTraits<RealScalar>::dummy_precision())
{
m_data.prune(reference,epsilon);
}
diff --git a/Eigen/src/plugins/MatrixCwiseUnaryOps.h b/Eigen/src/plugins/MatrixCwiseUnaryOps.h
index d75e229fb..8927711ed 100644
--- a/Eigen/src/plugins/MatrixCwiseUnaryOps.h
+++ b/Eigen/src/plugins/MatrixCwiseUnaryOps.h
@@ -75,7 +75,7 @@ cwiseInverse() const { return derived(); }
* \sa cwiseEqual(const MatrixBase<OtherDerived> &) const
*/
inline const CwiseUnaryOp<std::binder1st<std::equal_to<Scalar> >,Derived>
-cwiseEqual(Scalar s) const
+cwiseEqual(const Scalar& s) const
{
return CwiseUnaryOp<std::binder1st<std::equal_to<Scalar> >,Derived>
(derived(), std::bind1st(std::equal_to<Scalar>(), s));
diff --git a/bench/BenchSparseUtil.h b/bench/BenchSparseUtil.h
index 39db69345..a5ab10711 100644
--- a/bench/BenchSparseUtil.h
+++ b/bench/BenchSparseUtil.h
@@ -123,4 +123,18 @@ void eiToCSparse(const EigenSparseMatrix& src, cs* &dst)
#include <boost/numeric/ublas/matrix_sparse.hpp>
#include <boost/numeric/ublas/vector_of_vector.hpp>
+// using namespace boost;
+// using namespace boost::numeric;
+// using namespace boost::numeric::ublas;
+
+typedef boost::numeric::ublas::compressed_matrix<Scalar,boost::numeric::ublas::column_major> UblasMatrix;
+
+void eiToUblas(const EigenSparseMatrix& src, UblasMatrix& dst)
+{
+ for (int j=0; j<src.cols(); ++j)
+ for (EigenSparseMatrix::InnerIterator it(src.derived(), j); it; ++it)
+ dst(it.index(),j) = it.value();
+}
+
+
#endif
diff --git a/bench/sparse_product.cpp b/bench/sparse_product.cpp
index 0c72993cd..0b5558b89 100644
--- a/bench/sparse_product.cpp
+++ b/bench/sparse_product.cpp
@@ -3,12 +3,15 @@
//g++ -O3 -g0 -DNDEBUG sparse_product.cpp -I.. -I/home/gael/Coding/LinearAlgebra/mtl4/ -DDENSITY=0.05 -DSIZE=2000 && ./a.out
// -DNOGMM -DNOMTL -DCSPARSE
// -I /home/gael/Coding/LinearAlgebra/CSparse/Include/ /home/gael/Coding/LinearAlgebra/CSparse/Lib/libcsparse.a
+
+#include <typeinfo>
+
#ifndef SIZE
-#define SIZE 10000
+#define SIZE 1000000
#endif
#ifndef NNZPERCOL
-#define NNZPERCOL 32
+#define NNZPERCOL 6
#endif
#ifndef REPEAT
@@ -20,7 +23,7 @@
#include "BenchSparseUtil.h"
#ifndef NBTRIES
-#define NBTRIES 4
+#define NBTRIES 1
#endif
#define BENCH(X) \
@@ -60,6 +63,7 @@
cs* cs_sorted_multiply(const cs* a, const cs* b)
{
// return cs_multiply(a,b);
+
cs* A = cs_transpose(a, 1);
cs* B = cs_transpose(b, 1);
cs* D = cs_multiply(B,A); /* D = B'*A' */
@@ -155,26 +159,26 @@ int main(int argc, char *argv[])
// BENCH(sm3 = sm1.transpose() * sm2; )
// std::cout << " a' * b:\t" << timer.value() << endl;
-//
+// //
// BENCH(sm3 = sm1.transpose() * sm2.transpose(); )
// std::cout << " a' * b':\t" << timer.value() << endl;
-//
+// //
// BENCH(sm3 = sm1 * sm2.transpose(); )
// std::cout << " a * b' :\t" << timer.value() << endl;
-// std::cout << "\n\n";
-
- BENCH( sm3.setprod(sm1, sm2); )
- std::cout << " a * b:\t" << timer.value() << endl;
-
-// BENCH(sm3.setprod(sm1.transpose(),sm2); )
-// std::cout << " a' * b:\t" << timer.value() << endl;
+// std::cout << "\n";
//
-// BENCH(sm3.setprod(sm1.transpose(),sm2.transpose()); )
-// std::cout << " a' * b':\t" << timer.value() << endl;
+// BENCH( sm3._experimentalNewProduct(sm1, sm2); )
+// std::cout << " a * b:\t" << timer.value() << endl;
//
-// BENCH(sm3.setprod(sm1, sm2.transpose());)
+// BENCH(sm3._experimentalNewProduct(sm1.transpose(),sm2); )
+// std::cout << " a' * b:\t" << timer.value() << endl;
+// //
+// BENCH(sm3._experimentalNewProduct(sm1.transpose(),sm2.transpose()); )
+// std::cout << " a' * b':\t" << timer.value() << endl;
+// //
+// BENCH(sm3._experimentalNewProduct(sm1, sm2.transpose());)
// std::cout << " a * b' :\t" << timer.value() << endl;
}
@@ -247,6 +251,23 @@ int main(int argc, char *argv[])
}
#endif
+ #ifndef NOUBLAS
+ {
+ std::cout << "ublas\t" << nnzPerCol << "%\n";
+ UblasMatrix m1(rows,cols), m2(rows,cols), m3(rows,cols);
+ eiToUblas(sm1, m1);
+ eiToUblas(sm2, m2);
+
+ BENCH(boost::numeric::ublas::prod(m1, m2, m3););
+// timer.reset();
+// timer.start();
+// for (int k=0; k<REPEAT; ++k)
+// gmm::mult(m1, m2, gmmT3);
+// timer.stop();
+ std::cout << " a * b:\t" << timer.value() << endl;
+ }
+ #endif
+
// GMM++
#ifndef NOGMM
{
@@ -263,34 +284,34 @@ int main(int argc, char *argv[])
timer.stop();
std::cout << " a * b:\t" << timer.value() << endl;
- timer.reset();
- timer.start();
- for (int k=0; k<REPEAT; ++k)
- gmm::mult(gmm::transposed(m1), m2, gmmT3);
- timer.stop();
- std::cout << " a' * b:\t" << timer.value() << endl;
-
- if (rows<500)
- {
- timer.reset();
- timer.start();
- for (int k=0; k<REPEAT; ++k)
- gmm::mult(gmm::transposed(m1), gmm::transposed(m2), gmmT3);
- timer.stop();
- std::cout << " a' * b':\t" << timer.value() << endl;
-
- timer.reset();
- timer.start();
- for (int k=0; k<REPEAT; ++k)
- gmm::mult(m1, gmm::transposed(m2), gmmT3);
- timer.stop();
- std::cout << " a * b':\t" << timer.value() << endl;
- }
- else
- {
- std::cout << " a' * b':\t" << "forever" << endl;
- std::cout << " a * b':\t" << "forever" << endl;
- }
+// timer.reset();
+// timer.start();
+// for (int k=0; k<REPEAT; ++k)
+// gmm::mult(gmm::transposed(m1), m2, gmmT3);
+// timer.stop();
+// std::cout << " a' * b:\t" << timer.value() << endl;
+//
+// if (rows<500)
+// {
+// timer.reset();
+// timer.start();
+// for (int k=0; k<REPEAT; ++k)
+// gmm::mult(gmm::transposed(m1), gmm::transposed(m2), gmmT3);
+// timer.stop();
+// std::cout << " a' * b':\t" << timer.value() << endl;
+//
+// timer.reset();
+// timer.start();
+// for (int k=0; k<REPEAT; ++k)
+// gmm::mult(m1, gmm::transposed(m2), gmmT3);
+// timer.stop();
+// std::cout << " a * b':\t" << timer.value() << endl;
+// }
+// else
+// {
+// std::cout << " a' * b':\t" << "forever" << endl;
+// std::cout << " a * b':\t" << "forever" << endl;
+// }
}
#endif
@@ -309,26 +330,26 @@ int main(int argc, char *argv[])
timer.stop();
std::cout << " a * b:\t" << timer.value() << endl;
- timer.reset();
- timer.start();
- for (int k=0; k<REPEAT; ++k)
- m3 = trans(m1) * m2;
- timer.stop();
- std::cout << " a' * b:\t" << timer.value() << endl;
-
- timer.reset();
- timer.start();
- for (int k=0; k<REPEAT; ++k)
- m3 = trans(m1) * trans(m2);
- timer.stop();
- std::cout << " a' * b':\t" << timer.value() << endl;
-
- timer.reset();
- timer.start();
- for (int k=0; k<REPEAT; ++k)
- m3 = m1 * trans(m2);
- timer.stop();
- std::cout << " a * b' :\t" << timer.value() << endl;
+// timer.reset();
+// timer.start();
+// for (int k=0; k<REPEAT; ++k)
+// m3 = trans(m1) * m2;
+// timer.stop();
+// std::cout << " a' * b:\t" << timer.value() << endl;
+//
+// timer.reset();
+// timer.start();
+// for (int k=0; k<REPEAT; ++k)
+// m3 = trans(m1) * trans(m2);
+// timer.stop();
+// std::cout << " a' * b':\t" << timer.value() << endl;
+//
+// timer.reset();
+// timer.start();
+// for (int k=0; k<REPEAT; ++k)
+// m3 = m1 * trans(m2);
+// timer.stop();
+// std::cout << " a * b' :\t" << timer.value() << endl;
}
#endif
diff --git a/cmake/EigenTesting.cmake b/cmake/EigenTesting.cmake
index b8cd63ac1..c445f842b 100644
--- a/cmake/EigenTesting.cmake
+++ b/cmake/EigenTesting.cmake
@@ -148,6 +148,12 @@ macro(ei_testing_print_summary)
message("Enabled backends: ${EIGEN_TESTED_BACKENDS}")
message("Disabled backends: ${EIGEN_MISSING_BACKENDS}")
+ if(EIGEN_DEFAULT_TO_ROW_MAJOR)
+ message("Default order: Row-major")
+ else()
+ message("Default order: Column-major")
+ endif()
+
if(EIGEN_TEST_SSE2)
message("SSE2: ON")
else()
@@ -219,6 +225,7 @@ if(CMAKE_COMPILER_IS_GNUCXX)
option(EIGEN_COVERAGE_TESTING "Enable/disable gcov" OFF)
if(EIGEN_COVERAGE_TESTING)
set(COVERAGE_FLAGS "-fprofile-arcs -ftest-coverage")
+ set(CTEST_CUSTOM_COVERAGE_EXCLUDE "/test/")
else(EIGEN_COVERAGE_TESTING)
set(COVERAGE_FLAGS "")
endif(EIGEN_COVERAGE_TESTING)
@@ -226,7 +233,12 @@ if(CMAKE_COMPILER_IS_GNUCXX)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=gnu++0x")
endif(EIGEN_TEST_C++0x)
if(EIGEN_TEST_MAX_WARNING_LEVEL)
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wconversion")
+ CHECK_CXX_COMPILER_FLAG("-Wno-variadic-macros" FLAG_VARIADIC)
+ if(FLAG_VARIADIC)
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wconversion -Wno-variadic-macros")
+ else(FLAG_VARIADIC)
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wconversion")
+ endif(FLAG_VARIADIC)
endif(EIGEN_TEST_MAX_WARNING_LEVEL)
if(CMAKE_SYSTEM_NAME MATCHES Linux)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COVERAGE_FLAGS} -g2")
diff --git a/doc/A05_PortingFrom2To3.dox b/doc/A05_PortingFrom2To3.dox
index 9c157b016..57f801f84 100644
--- a/doc/A05_PortingFrom2To3.dox
+++ b/doc/A05_PortingFrom2To3.dox
@@ -81,9 +81,9 @@ With Eigen2 you would have written:
c = (a.cwise().abs().cwise().pow(3)).cwise() * (b.cwise().abs().cwise().sin());
\endcode
-\section LazyVsNoalias Lazy evaluation versus noalias
+\section LazyVsNoalias Lazy evaluation and noalias
-In Eigen all operations are performed in a lazy fashion except the matrix products which are always evaluated to a temporary by default.
+In Eigen all operations are performed in a lazy fashion except the matrix products which are always evaluated into a temporary by default.
In Eigen2, lazy evaluation could be enforced by tagging a product using the .lazy() function. However, in complex expressions it was not
easy to determine where to put the lazy() function. In Eigen3, the lazy() feature has been superseded by the MatrixBase::noalias() function
which can be used on the left hand side of an assignment when no aliasing can occur. Here is an example:
@@ -92,6 +92,10 @@ MatrixXf a, b, c;
...
c.noalias() += 2 * a.transpose() * b;
\endcode
+However, the noalias mechanism does not cover all the features of the old .lazy(). Indeed, in some extremely rare cases,
+it might be useful to explicit request for a lay product, i.e., for a product which will be evaluated one coefficient at once, on request,
+just like any other expressions. To this end you can use the MatrixBase::lazyProduct() function, however we strongly discourage you to
+use it unless you are sure of what you are doing, i.e., you have rigourosly measured a speed improvement.
*/
diff --git a/doc/D01_StlContainers.dox b/doc/D01_StlContainers.dox
index db682c996..83e930e50 100644
--- a/doc/D01_StlContainers.dox
+++ b/doc/D01_StlContainers.dox
@@ -41,6 +41,21 @@ Here is an example:
std::vector<Eigen::Vector4f,Eigen::aligned_allocator<Eigen::Vector4f> >
\endcode
+\subsection vector_spec An alternative - specializing std::vector for Eigen types
+
+As an alternative to the recommended approach described above, you have the option to specialize std::vector for Eigen types requiring alignment.
+The advantage is that you won't need to declare std::vector all over with Eigen::allocator. One drawback on the other hand side is that
+the specialization needs to be defined before all code pieces in which e.g. std::vector<Vector2d> is used. Otherwise, without knowing the specialization
+the compiler will compile that particular instance with the default std::allocator and you program is most likely to crash.
+
+Here is an example:
+\code
+#include<Eigen/StdVector>
+\/* ... *\/
+EIGEN_DEFINE_STL_VECTOR_SPECIALIZATION(Matrix2d)
+std::vector<Eigen::Vector2d>
+\endcode
+
<span class="note">\b Explanation: The resize() method of std::vector takes a value_type argument (defaulting to value_type()). So with std::vector<Eigen::Vector4f>, some Eigen::Vector4f objects will be passed by value, which discards any alignment modifiers, so a Eigen::Vector4f can be created at an unaligned location. In order to avoid that, the only solution we saw was to specialize std::vector to make it work on a slight modification of, here, Eigen::Vector4f, that is able to deal properly with this situation.
</span>
diff --git a/doc/snippets/DenseBase_LinSpaced.cpp b/doc/snippets/DenseBase_LinSpaced.cpp
new file mode 100644
index 000000000..c8c3e972c
--- /dev/null
+++ b/doc/snippets/DenseBase_LinSpaced.cpp
@@ -0,0 +1,2 @@
+cout << VectorXi::LinSpaced(7,10,4).transpose() << endl;
+cout << VectorXd::LinSpaced(0.0,1.0,5).transpose() << endl;
diff --git a/doc/snippets/DenseBase_LinSpaced_seq.cpp b/doc/snippets/DenseBase_LinSpaced_seq.cpp
new file mode 100644
index 000000000..73873c4e9
--- /dev/null
+++ b/doc/snippets/DenseBase_LinSpaced_seq.cpp
@@ -0,0 +1,2 @@
+cout << VectorXi::LinSpaced(Sequential,7,10,4).transpose() << endl;
+cout << VectorXd::LinSpaced(Sequential,0.0,1.0,5).transpose() << endl;
diff --git a/doc/snippets/DenseBase_setLinSpaced.cpp b/doc/snippets/DenseBase_setLinSpaced.cpp
new file mode 100644
index 000000000..a8ea73407
--- /dev/null
+++ b/doc/snippets/DenseBase_setLinSpaced.cpp
@@ -0,0 +1,3 @@
+VectorXf v;
+v.setLinSpaced(0.5f,1.5f,5).transpose();
+cout << v << endl;
diff --git a/scripts/eigen_gen_docs b/scripts/eigen_gen_docs
index f6727eff1..4c9214e51 100644
--- a/scripts/eigen_gen_docs
+++ b/scripts/eigen_gen_docs
@@ -8,16 +8,12 @@ USER=${USER:-'orzel'}
# step 1 : build
# todo if 'build is not there, create one:
#mkdir build
-(cd build && cmake .. && make -j3 doc) || echo "make failed"; exit 1
+(cd build && cmake .. && make -j3 doc) || { echo "make failed"; exit 1; }
#todo: n+1 where n = number of cpus
#step 2 : upload
-BRANCH=`hg branch`
-if [ $BRANCH == "default" ]
-then
- BRANCH='devel'
-fi
# (the '/' at the end of path are very important, see rsync documentation)
-rsync -az build/doc/html/ $USER@ssh.tuxfamily.org:eigen/eigen.tuxfamily.org-web/htdocs/dox-$BRANCH/ || (echo "upload failed"; exit 1)
+rsync -az build/doc/html/ $USER@ssh.tuxfamily.org:eigen/eigen.tuxfamily.org-web/htdocs/dox-devel/ || { echo "upload failed"; exit 1; }
+echo "Uploaded successfully"
diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt
index 6dd0d6916..b0da2a1d8 100644
--- a/test/CMakeLists.txt
+++ b/test/CMakeLists.txt
@@ -5,6 +5,11 @@ add_dependencies(check buildtests)
option(EIGEN_SPLIT_LARGE_TESTS "Split large tests into smaller executables" ON)
+option(EIGEN_DEFAULT_TO_ROW_MAJOR "Use row-major as default matrix storage order" OFF)
+if(EIGEN_DEFAULT_TO_ROW_MAJOR)
+ add_definitions("-DEIGEN_DEFAULT_TO_ROW_MAJOR")
+endif()
+
find_package(GSL)
if(GSL_FOUND AND GSL_VERSION_MINOR LESS 9)
set(GSL_FOUND "")
@@ -147,6 +152,7 @@ ei_add_test(geo_parametrizedline)
ei_add_test(geo_alignedbox)
ei_add_test(regression)
ei_add_test(stdvector)
+ei_add_test(stdvector_overload)
ei_add_test(resize)
if(QT4_FOUND)
ei_add_test(qtvector " " "${QT_QTCORE_LIBRARY}")
@@ -162,6 +168,7 @@ ei_add_test(conservative_resize)
ei_add_test(permutationmatrices)
ei_add_test(eigen2support)
ei_add_test(nullary)
+ei_add_test(nesting_ops "${CMAKE_CXX_FLAGS_DEBUG}")
ei_add_test(prec_inverse_4x4)
diff --git a/test/array.cpp b/test/array.cpp
index 8a71b4020..e51dbac2a 100644
--- a/test/array.cpp
+++ b/test/array.cpp
@@ -132,6 +132,30 @@ template<typename MatrixType> void comparisons(const MatrixType& m)
VERIFY_IS_APPROX(((m1.abs()+1)>RealScalar(0.1)).rowwise().count(), ArrayXi::Constant(rows, cols));
}
+template<typename MatrixType> void array_real(const MatrixType& m)
+{
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+
+ int rows = m.rows();
+ int cols = m.cols();
+
+ MatrixType m1 = MatrixType::Random(rows, cols),
+ m2 = MatrixType::Random(rows, cols),
+ m3(rows, cols);
+
+ VERIFY_IS_APPROX(m1.sin(), std::sin(m1));
+ VERIFY_IS_APPROX(m1.sin(), ei_sin(m1));
+ VERIFY_IS_APPROX(m1.cos(), ei_cos(m1));
+ VERIFY_IS_APPROX(m1.cos(), ei_cos(m1));
+ VERIFY_IS_APPROX(m1.abs().sqrt(), std::sqrt(std::abs(m1)));
+ VERIFY_IS_APPROX(m1.abs().sqrt(), ei_sqrt(ei_abs(m1)));
+ VERIFY_IS_APPROX(m1.abs().log(), std::log(std::abs(m1)));
+ VERIFY_IS_APPROX(m1.abs().log(), ei_log(ei_abs(m1)));
+ VERIFY_IS_APPROX(m1.exp(), std::exp(m1));
+ VERIFY_IS_APPROX(m1.exp(), ei_exp(m1));
+}
+
void test_array()
{
for(int i = 0; i < g_repeat; i++) {
@@ -149,4 +173,10 @@ void test_array()
CALL_SUBTEST_5( comparisons(ArrayXXf(8, 12)) );
CALL_SUBTEST_6( comparisons(ArrayXXi(8, 12)) );
}
+ for(int i = 0; i < g_repeat; i++) {
+ CALL_SUBTEST_1( array_real(Array<float, 1, 1>()) );
+ CALL_SUBTEST_2( array_real(Array22f()) );
+ CALL_SUBTEST_3( array_real(Array44d()) );
+ CALL_SUBTEST_5( array_real(ArrayXXf(8, 12)) );
+ }
}
diff --git a/test/first_aligned.cpp b/test/first_aligned.cpp
index 3cf1a7eef..f00ed57a5 100644
--- a/test/first_aligned.cpp
+++ b/test/first_aligned.cpp
@@ -50,9 +50,9 @@ void test_first_aligned()
test_first_aligned_helper(array_float+5, 50);
EIGEN_ALIGN16 double array_double[100];
- test_first_aligned_helper(array_float, 50);
- test_first_aligned_helper(array_float+1, 50);
- test_first_aligned_helper(array_float+2, 50);
+ test_first_aligned_helper(array_double, 50);
+ test_first_aligned_helper(array_double+1, 50);
+ test_first_aligned_helper(array_double+2, 50);
double *array_double_plus_4_bytes = (double*)(size_t(array_double)+4);
test_none_aligned_helper(array_double_plus_4_bytes, 50);
diff --git a/test/geo_alignedbox.cpp b/test/geo_alignedbox.cpp
index d496578da..5233d602a 100644
--- a/test/geo_alignedbox.cpp
+++ b/test/geo_alignedbox.cpp
@@ -27,6 +27,9 @@
#include <Eigen/LU>
#include <Eigen/QR>
+#include<iostream>
+using namespace std;
+
template<typename BoxType> void alignedbox(const BoxType& _box)
{
/* this test covers the following files:
@@ -40,6 +43,8 @@ template<typename BoxType> void alignedbox(const BoxType& _box)
VectorType p0 = VectorType::Random(dim);
VectorType p1 = VectorType::Random(dim);
+ while( p1 == p0 ){
+ p1 = VectorType::Random(dim); }
RealScalar s1 = ei_random<RealScalar>(0,1);
BoxType b0(dim);
@@ -49,34 +54,131 @@ template<typename BoxType> void alignedbox(const BoxType& _box)
b0.extend(p0);
b0.extend(p1);
VERIFY(b0.contains(p0*s1+(Scalar(1)-s1)*p1));
- VERIFY(!b0.contains(p0 + (1+s1)*(p1-p0)));
+ VERIFY(!b0.contains(p0 + (2+s1)*(p1-p0)));
(b2 = b0).extend(b1);
VERIFY(b2.contains(b0));
VERIFY(b2.contains(b1));
VERIFY_IS_APPROX(b2.clamp(b0), b0);
+
+ // alignment -- make sure there is no memory alignment assertion
+ BoxType *bp0 = new BoxType(dim);
+ BoxType *bp1 = new BoxType(dim);
+ bp0->extend(*bp1);
+ delete bp0;
+ delete bp1;
+
+ // sampling
+ for( int i=0; i<10; ++i )
+ {
+ VectorType r = b0.sample();
+ VERIFY(b0.contains(r));
+ }
+
+}
+
+
+
+template<typename BoxType>
+void alignedboxCastTests(const BoxType& _box)
+{
// casting
+ const int dim = _box.dim();
+ typedef typename BoxType::Scalar Scalar;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ typedef Matrix<Scalar, BoxType::AmbientDimAtCompileTime, 1> VectorType;
+
+ VectorType p0 = VectorType::Random(dim);
+ VectorType p1 = VectorType::Random(dim);
+
+ BoxType b0(dim);
+
+ b0.extend(p0);
+ b0.extend(p1);
+
const int Dim = BoxType::AmbientDimAtCompileTime;
typedef typename GetDifferentType<Scalar>::type OtherScalar;
AlignedBox<OtherScalar,Dim> hp1f = b0.template cast<OtherScalar>();
VERIFY_IS_APPROX(hp1f.template cast<Scalar>(),b0);
AlignedBox<Scalar,Dim> hp1d = b0.template cast<Scalar>();
VERIFY_IS_APPROX(hp1d.template cast<Scalar>(),b0);
+}
- // alignment -- make sure there is no memory alignment assertion
- BoxType *bp0 = new BoxType(dim);
- BoxType *bp1 = new BoxType(dim);
- bp0->extend(*bp1);
- delete bp0;
- delete bp1;
+
+void specificTest1()
+{
+ Vector2f m; m << -1.0f, -2.0f;
+ Vector2f M; M << 1.0f, 5.0f;
+
+ typedef AlignedBox<float,2> BoxType;
+ BoxType box( m, M );
+
+ Vector2f sides = M-m;
+ VERIFY_IS_APPROX(sides, box.sizes() );
+ VERIFY_IS_APPROX(sides[1], box.sizes()[1] );
+ VERIFY_IS_APPROX(sides[1], box.sizes().maxCoeff() );
+ VERIFY_IS_APPROX(sides[0], box.sizes().minCoeff() );
+
+ VERIFY_IS_APPROX( 14.0f, box.volume() );
+ VERIFY_IS_APPROX( 53.0f, box.diagonal().squaredNorm() );
+ VERIFY_IS_APPROX( ei_sqrt( 53.0f ), box.diagonal().norm() );
+
+ VERIFY_IS_APPROX( m, box.corner( BoxType::BottomLeft ) );
+ VERIFY_IS_APPROX( M, box.corner( BoxType::TopRight ) );
+ Vector2f bottomRight; bottomRight << M[0], m[1];
+ Vector2f topLeft; topLeft << m[0], M[1];
+ VERIFY_IS_APPROX( bottomRight, box.corner( BoxType::BottomRight ) );
+ VERIFY_IS_APPROX( topLeft, box.corner( BoxType::TopLeft ) );
}
+
+void specificTest2()
+{
+ Vector3i m; m << -1, -2, 0;
+ Vector3i M; M << 1, 5, 3;
+
+ typedef AlignedBox<int,3> BoxType;
+ BoxType box( m, M );
+
+ Vector3i sides = M-m;
+ VERIFY_IS_APPROX(sides, box.sizes() );
+ VERIFY_IS_APPROX(sides[1], box.sizes()[1] );
+ VERIFY_IS_APPROX(sides[1], box.sizes().maxCoeff() );
+ VERIFY_IS_APPROX(sides[0], box.sizes().minCoeff() );
+
+ VERIFY_IS_APPROX( 42, box.volume() );
+ VERIFY_IS_APPROX( 62, box.diagonal().squaredNorm() );
+
+ VERIFY_IS_APPROX( m, box.corner( BoxType::BottomLeftFloor ) );
+ VERIFY_IS_APPROX( M, box.corner( BoxType::TopRightCeil ) );
+ Vector3i bottomRightFloor; bottomRightFloor << M[0], m[1], m[2];
+ Vector3i topLeftFloor; topLeftFloor << m[0], M[1], m[2];
+ VERIFY_IS_APPROX( bottomRightFloor, box.corner( BoxType::BottomRightFloor ) );
+ VERIFY_IS_APPROX( topLeftFloor, box.corner( BoxType::TopLeftFloor ) );
+}
+
+
void test_geo_alignedbox()
{
- for(int i = 0; i < g_repeat; i++) {
+ for(int i = 0; i < g_repeat; i++)
+ {
CALL_SUBTEST_1( alignedbox(AlignedBox<float,2>()) );
- CALL_SUBTEST_2( alignedbox(AlignedBox<float,3>()) );
- CALL_SUBTEST_3( alignedbox(AlignedBox<double,4>()) );
+ CALL_SUBTEST_2( alignedboxCastTests(AlignedBox<float,2>()) );
+
+ CALL_SUBTEST_3( alignedbox(AlignedBox<float,3>()) );
+ CALL_SUBTEST_4( alignedboxCastTests(AlignedBox<float,3>()) );
+
+ CALL_SUBTEST_5( alignedbox(AlignedBox<double,4>()) );
+ CALL_SUBTEST_6( alignedboxCastTests(AlignedBox<double,4>()) );
+
+ CALL_SUBTEST_7( alignedbox(AlignedBox<double,1>()) );
+ CALL_SUBTEST_8( alignedboxCastTests(AlignedBox<double,1>()) );
+
+ CALL_SUBTEST_9( alignedbox(AlignedBox<int,1>()) );
+ CALL_SUBTEST_10( alignedbox(AlignedBox<int,2>()) );
+ CALL_SUBTEST_11( alignedbox(AlignedBox<int,3>()) );
}
+ CALL_SUBTEST_12( specificTest1() );
+ CALL_SUBTEST_13( specificTest2() );
}
diff --git a/test/geo_hyperplane.cpp b/test/geo_hyperplane.cpp
index 1fd1e281a..cd19698bc 100644
--- a/test/geo_hyperplane.cpp
+++ b/test/geo_hyperplane.cpp
@@ -136,6 +136,6 @@ void test_geo_hyperplane()
CALL_SUBTEST_3( hyperplane(Hyperplane<double,4>()) );
CALL_SUBTEST_4( hyperplane(Hyperplane<std::complex<double>,5>()) );
CALL_SUBTEST_1( lines<float>() );
- CALL_SUBTEST_2( lines<double>() );
+ CALL_SUBTEST_3( lines<double>() );
}
}
diff --git a/test/geo_quaternion.cpp b/test/geo_quaternion.cpp
index 052c01fbf..311ffd2ec 100644
--- a/test/geo_quaternion.cpp
+++ b/test/geo_quaternion.cpp
@@ -59,6 +59,9 @@ template<typename Scalar> void quaternion(void)
q1.coeffs().setRandom();
VERIFY_IS_APPROX(q1.coeffs(), (q1*q2).coeffs());
+ // concatenation
+ q1 *= q2;
+
q1 = AngleAxisx(a, v0.normalized());
q2 = AngleAxisx(a, v1.normalized());
diff --git a/test/geo_transformations.cpp b/test/geo_transformations.cpp
index fc542e71b..895fe0f08 100644
--- a/test/geo_transformations.cpp
+++ b/test/geo_transformations.cpp
@@ -85,6 +85,10 @@ template<typename Scalar, int Mode> void transformations(void)
VERIFY_IS_APPROX(q1 * v1, Quaternionx(aa) * v1);
VERIFY_IS_NOT_APPROX(q1 * v1, Quaternionx(AngleAxisx(aa.angle()*2,aa.axis())) * v1);
+ aa.fromRotationMatrix(aa.toRotationMatrix());
+ VERIFY_IS_APPROX(q1 * v1, Quaternionx(aa) * v1);
+ VERIFY_IS_NOT_APPROX(q1 * v1, Quaternionx(AngleAxisx(aa.angle()*2,aa.axis())) * v1);
+
// AngleAxis
VERIFY_IS_APPROX(AngleAxisx(a,v1.normalized()).toRotationMatrix(),
Quaternionx(AngleAxisx(a,v1.normalized())).toRotationMatrix());
diff --git a/test/inverse.cpp b/test/inverse.cpp
index b80e139e0..713caf4a6 100644
--- a/test/inverse.cpp
+++ b/test/inverse.cpp
@@ -96,5 +96,6 @@ void test_inverse()
CALL_SUBTEST_5( inverse(MatrixXf(s,s)) );
s = ei_random<int>(25,100);
CALL_SUBTEST_6( inverse(MatrixXcd(s,s)) );
+ CALL_SUBTEST_7( inverse(Matrix4d()) );
}
}
diff --git a/test/lu.cpp b/test/lu.cpp
index c2237febf..45308ff82 100644
--- a/test/lu.cpp
+++ b/test/lu.cpp
@@ -35,7 +35,7 @@ template<typename MatrixType> void lu_non_invertible()
int rows, cols, cols2;
if(MatrixType::RowsAtCompileTime==Dynamic)
{
- rows = ei_random<int>(20,200);
+ rows = ei_random<int>(2,200);
}
else
{
@@ -43,8 +43,8 @@ template<typename MatrixType> void lu_non_invertible()
}
if(MatrixType::ColsAtCompileTime==Dynamic)
{
- cols = ei_random<int>(20,200);
- cols2 = ei_random<int>(20,200);
+ cols = ei_random<int>(2,200);
+ cols2 = ei_random<int>(2,200);
}
else
{
@@ -108,7 +108,7 @@ template<typename MatrixType> void lu_invertible()
LU.h
*/
typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
- int size = ei_random<int>(10,200);
+ int size = ei_random<int>(1,200);
MatrixType m1(size, size), m2(size, size), m3(size, size);
m1 = MatrixType::Random(size,size);
@@ -185,5 +185,7 @@ void test_lu()
CALL_SUBTEST_6( lu_non_invertible<MatrixXcd>() );
CALL_SUBTEST_6( lu_invertible<MatrixXcd>() );
CALL_SUBTEST_6( lu_verify_assert<MatrixXcd>() );
+
+ CALL_SUBTEST_7(( lu_non_invertible<Matrix<float,Dynamic,16> >() ));
}
}
diff --git a/test/map.cpp b/test/map.cpp
index 14c0393db..603b6159b 100644
--- a/test/map.cpp
+++ b/test/map.cpp
@@ -24,7 +24,7 @@
#include "main.h"
-template<typename VectorType> void map_class(const VectorType& m)
+template<typename VectorType> void map_class_vector(const VectorType& m)
{
typedef typename VectorType::Scalar Scalar;
@@ -51,6 +51,34 @@ template<typename VectorType> void map_class(const VectorType& m)
delete[] array3;
}
+template<typename MatrixType> void map_class_matrix(const MatrixType& m)
+{
+ typedef typename MatrixType::Scalar Scalar;
+
+ int rows = m.rows(), cols = m.cols(), size = rows*cols;
+
+ // test Map.h
+ Scalar* array1 = ei_aligned_new<Scalar>(size);
+ for(int i = 0; i < size; i++) array1[i] = Scalar(1);
+ Scalar* array2 = ei_aligned_new<Scalar>(size);
+ for(int i = 0; i < size; i++) array2[i] = Scalar(1);
+ Scalar* array3 = new Scalar[size+1];
+ for(int i = 0; i < size+1; i++) array3[i] = Scalar(1);
+ Scalar* array3unaligned = size_t(array3)%16 == 0 ? array3+1 : array3;
+ Map<MatrixType, Aligned>(array1, rows, cols) = MatrixType::Ones(rows,cols);
+ Map<MatrixType>(array2, rows, cols) = Map<MatrixType>(array1, rows, cols);
+ Map<MatrixType>(array3unaligned, rows, cols) = Map<MatrixType>(array1, rows, cols);
+ MatrixType ma1 = Map<MatrixType>(array1, rows, cols);
+ MatrixType ma2 = Map<MatrixType, Aligned>(array2, rows, cols);
+ VERIFY_IS_APPROX(ma1, ma2);
+ MatrixType ma3 = Map<MatrixType>(array3unaligned, rows, cols);
+ VERIFY_IS_APPROX(ma1, ma3);
+
+ ei_aligned_delete(array1, size);
+ ei_aligned_delete(array2, size);
+ delete[] array3;
+}
+
template<typename VectorType> void map_static_methods(const VectorType& m)
{
typedef typename VectorType::Scalar Scalar;
@@ -81,11 +109,17 @@ template<typename VectorType> void map_static_methods(const VectorType& m)
void test_map()
{
for(int i = 0; i < g_repeat; i++) {
- CALL_SUBTEST_1( map_class(Matrix<float, 1, 1>()) );
- CALL_SUBTEST_2( map_class(Vector4d()) );
- CALL_SUBTEST_3( map_class(RowVector4f()) );
- CALL_SUBTEST_4( map_class(VectorXcf(8)) );
- CALL_SUBTEST_5( map_class(VectorXi(12)) );
+ CALL_SUBTEST_1( map_class_vector(Matrix<float, 1, 1>()) );
+ CALL_SUBTEST_2( map_class_vector(Vector4d()) );
+ CALL_SUBTEST_3( map_class_vector(RowVector4f()) );
+ CALL_SUBTEST_4( map_class_vector(VectorXcf(8)) );
+ CALL_SUBTEST_5( map_class_vector(VectorXi(12)) );
+
+ CALL_SUBTEST_1( map_class_matrix(Matrix<float, 1, 1>()) );
+ CALL_SUBTEST_2( map_class_matrix(Matrix4d()) );
+ CALL_SUBTEST_11( map_class_matrix(Matrix<float,3,5>()) );
+ CALL_SUBTEST_4( map_class_matrix(MatrixXcf(ei_random<int>(1,10),ei_random<int>(1,10))) );
+ CALL_SUBTEST_5( map_class_matrix(MatrixXi(ei_random<int>(1,10),ei_random<int>(1,10))) );
CALL_SUBTEST_6( map_static_methods(Matrix<double, 1, 1>()) );
CALL_SUBTEST_7( map_static_methods(Vector3f()) );
diff --git a/test/nesting_ops.cpp b/test/nesting_ops.cpp
new file mode 100644
index 000000000..831c71da7
--- /dev/null
+++ b/test/nesting_ops.cpp
@@ -0,0 +1,56 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2010 Hauke Heibel <hauke.heibel@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#include "main.h"
+
+template <typename MatrixType> void run_nesting_ops(const MatrixType& _m)
+{
+ typename MatrixType::Nested m(_m);
+ typedef typename MatrixType::Scalar Scalar;
+
+#ifdef NDEBUG
+ const bool is_debug = false;
+#else
+ const bool is_debug = true;
+#endif
+
+ // Make really sure that we are in debug mode! We don't want any type of
+ // inlining for these tests to pass.
+ VERIFY(is_debug);
+
+ // The only intention of these tests is to ensure that this code does
+ // not trigger any asserts or segmentation faults... more to come.
+ VERIFY( (m.transpose() * m).diagonal().sum() == (m.transpose() * m).diagonal().sum() );
+ VERIFY( (m.transpose() * m).diagonal().array().abs().sum() == (m.transpose() * m).diagonal().array().abs().sum() );
+
+ VERIFY( (m.transpose() * m).array().abs().sum() == (m.transpose() * m).array().abs().sum() );
+}
+
+void test_nesting_ops()
+{
+ CALL_SUBTEST_1(run_nesting_ops(MatrixXf::Random(25,25)));
+ CALL_SUBTEST_2(run_nesting_ops(MatrixXd::Random(25,25)));
+ CALL_SUBTEST_3(run_nesting_ops(Matrix4f::Random()));
+ CALL_SUBTEST_4(run_nesting_ops(Matrix4d::Random()));
+}
diff --git a/test/nullary.cpp b/test/nullary.cpp
index a7f6c60a7..3adfc33fe 100644
--- a/test/nullary.cpp
+++ b/test/nullary.cpp
@@ -46,6 +46,54 @@ bool equalsIdentity(const MatrixType& A)
return offDiagOK && diagOK;
}
+template<typename VectorType>
+void testVectorType(const VectorType& base)
+{
+ typedef typename ei_traits<VectorType>::Scalar Scalar;
+ Scalar low = ei_random<Scalar>(-500,500);
+ Scalar high = ei_random<Scalar>(-500,500);
+ if (low>high) std::swap(low,high);
+ const int size = base.size();
+ const Scalar step = (high-low)/(size-1);
+
+ // check whether the result yields what we expect it to do
+ VectorType m(base);
+ m.setLinSpaced(low,high,size);
+
+ VectorType n(size);
+ for (int i=0; i<size; ++i)
+ n(i) = low+i*step;
+
+ VERIFY( (m-n).norm() < std::numeric_limits<Scalar>::epsilon()*10e3 );
+
+ // random access version
+ m = VectorType::LinSpaced(low,high,size);
+ VERIFY( (m-n).norm() < std::numeric_limits<Scalar>::epsilon()*10e3 );
+
+ // These guys sometimes fail! This is not good. Any ideas how to fix them!?
+ //VERIFY( m(m.size()-1) == high );
+ //VERIFY( m(0) == low );
+
+ // sequential access version
+ m = VectorType::LinSpaced(Sequential,low,high,size);
+ VERIFY( (m-n).norm() < std::numeric_limits<Scalar>::epsilon()*10e3 );
+
+ // These guys sometimes fail! This is not good. Any ideas how to fix them!?
+ //VERIFY( m(m.size()-1) == high );
+ //VERIFY( m(0) == low );
+
+ // check whether everything works with row and col major vectors
+ Matrix<Scalar,Dynamic,1> row_vector(size);
+ Matrix<Scalar,1,Dynamic> col_vector(size);
+ row_vector.setLinSpaced(low,high,size);
+ col_vector.setLinSpaced(low,high,size);
+ VERIFY( (row_vector-col_vector.transpose()).norm() < 1e-10 );
+
+ Matrix<Scalar,Dynamic,1> size_changer(size+50);
+ size_changer.setLinSpaced(low,high,size);
+ VERIFY( size_changer.size() == size );
+}
+
template<typename MatrixType>
void testMatrixType(const MatrixType& m)
{
@@ -63,4 +111,10 @@ void test_nullary()
CALL_SUBTEST_1( testMatrixType(Matrix2d()) );
CALL_SUBTEST_2( testMatrixType(MatrixXcf(50,50)) );
CALL_SUBTEST_3( testMatrixType(MatrixXf(5,7)) );
+ CALL_SUBTEST_4( testVectorType(VectorXd(51)) );
+ CALL_SUBTEST_5( testVectorType(VectorXd(41)) );
+ CALL_SUBTEST_6( testVectorType(Vector3d()) );
+ CALL_SUBTEST_7( testVectorType(VectorXf(51)) );
+ CALL_SUBTEST_8( testVectorType(VectorXf(41)) );
+ CALL_SUBTEST_9( testVectorType(Vector3f()) );
}
diff --git a/test/prec_inverse_4x4.cpp b/test/prec_inverse_4x4.cpp
index e1b05aa0d..59ccbbcaf 100644
--- a/test/prec_inverse_4x4.cpp
+++ b/test/prec_inverse_4x4.cpp
@@ -35,7 +35,7 @@ template<typename MatrixType> void inverse_permutation_4x4()
{
MatrixType m = PermutationMatrix<4>(indices);
MatrixType inv = m.inverse();
- double error = double( (m*inv-MatrixType::Identity()).norm() / epsilon<Scalar>() );
+ double error = double( (m*inv-MatrixType::Identity()).norm() / NumTraits<Scalar>::epsilon() );
VERIFY(error == 0.0);
std::next_permutation(indices.data(),indices.data()+4);
}
@@ -53,9 +53,9 @@ template<typename MatrixType> void inverse_general_4x4(int repeat)
do {
m = MatrixType::Random();
absdet = ei_abs(m.determinant());
- } while(absdet < epsilon<Scalar>());
+ } while(absdet < NumTraits<Scalar>::epsilon());
MatrixType inv = m.inverse();
- double error = double( (m*inv-MatrixType::Identity()).norm() * absdet / epsilon<Scalar>() );
+ double error = double( (m*inv-MatrixType::Identity()).norm() * absdet / NumTraits<Scalar>::epsilon() );
error_sum += error;
error_max = std::max(error_max, error);
}
diff --git a/test/product.h b/test/product.h
index 85afbe1e4..f6109fae4 100644
--- a/test/product.h
+++ b/test/product.h
@@ -26,7 +26,7 @@
#include <Eigen/QR>
template<typename Derived1, typename Derived2>
-bool areNotApprox(const MatrixBase<Derived1>& m1, const MatrixBase<Derived2>& m2, typename Derived1::RealScalar epsilon = dummy_precision<typename Derived1::RealScalar>())
+bool areNotApprox(const MatrixBase<Derived1>& m1, const MatrixBase<Derived2>& m2, typename Derived1::RealScalar epsilon = NumTraits<typename Derived1::RealScalar>::dummy_precision())
{
return !((m1-m2).cwiseAbs2().maxCoeff() < epsilon * epsilon
* std::max(m1.cwiseAbs2().maxCoeff(), m2.cwiseAbs2().maxCoeff()));
diff --git a/test/product_notemporary.cpp b/test/product_notemporary.cpp
index b8bc3acc3..9084cde6b 100644
--- a/test/product_notemporary.cpp
+++ b/test/product_notemporary.cpp
@@ -24,10 +24,7 @@
static int nb_temporaries;
-#define EIGEN_DEBUG_MATRIX_CTOR { \
- if(SizeAtCompileTime==Dynamic) \
- nb_temporaries++; \
- }
+#define EIGEN_DEBUG_MATRIX_CTOR { if(size!=0) nb_temporaries++; }
#include "main.h"
@@ -40,10 +37,11 @@ static int nb_temporaries;
template<typename MatrixType> void product_notemporary(const MatrixType& m)
{
- /* This test checks the number of tempories created
+ /* This test checks the number of temporaries created
* during the evaluation of a complex expression */
typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::RealScalar RealScalar;
typedef Matrix<Scalar, 1, Dynamic> RowVectorType;
typedef Matrix<Scalar, Dynamic, 1> ColVectorType;
typedef Matrix<Scalar, Dynamic, Dynamic, RowMajor> RowMajorMatrixType;
@@ -105,10 +103,22 @@ template<typename MatrixType> void product_notemporary(const MatrixType& m)
VERIFY_EVALUATION_COUNT( m3.template selfadjointView<Lower>().rankUpdate(m2.adjoint()), 0);
+ // Here we will get 1 temporary for each resize operation of the lhs operator; resize(r1,c1) would lead to zero temporaries
m3.resize(1,1);
- VERIFY_EVALUATION_COUNT( m3.noalias() = m1.block(r0,r0,r1,r1).template selfadjointView<Lower>() * m2.block(r0,c0,r1,c1), 0);
+ VERIFY_EVALUATION_COUNT( m3.noalias() = m1.block(r0,r0,r1,r1).template selfadjointView<Lower>() * m2.block(r0,c0,r1,c1), 1);
m3.resize(1,1);
- VERIFY_EVALUATION_COUNT( m3.noalias() = m1.block(r0,r0,r1,r1).template triangularView<UnitUpper>() * m2.block(r0,c0,r1,c1), 0);
+ VERIFY_EVALUATION_COUNT( m3.noalias() = m1.block(r0,r0,r1,r1).template triangularView<UnitUpper>() * m2.block(r0,c0,r1,c1), 1);
+
+ // Zero temporaries for lazy products ...
+ VERIFY_EVALUATION_COUNT( Scalar tmp = 0; tmp += Scalar(RealScalar(1)) / (m3.transpose().lazyProduct(m3)).diagonal().sum(), 0 );
+
+ // ... and even no temporary for even deeply (>=2) nested products
+ VERIFY_EVALUATION_COUNT( Scalar tmp = 0; tmp += Scalar(RealScalar(1)) / (m3.transpose() * m3).diagonal().sum(), 0 );
+ VERIFY_EVALUATION_COUNT( Scalar tmp = 0; tmp += Scalar(RealScalar(1)) / (m3.transpose() * m3).diagonal().array().abs().sum(), 0 );
+
+ // Zero temporaries for ... CoeffBasedProductMode
+ // - does not work with GCC because of the <..>, we'ld need variadic macros ...
+ //VERIFY_EVALUATION_COUNT( m3.col(0).head<5>() * m3.col(0).transpose() + m3.col(0).head<5>() * m3.col(0).transpose(), 0 );
}
void test_product_notemporary()
diff --git a/test/redux.cpp b/test/redux.cpp
index 3293fd54e..2dc4dcc45 100644
--- a/test/redux.cpp
+++ b/test/redux.cpp
@@ -27,6 +27,7 @@
template<typename MatrixType> void matrixRedux(const MatrixType& m)
{
typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::RealScalar RealScalar;
int rows = m.rows();
int cols = m.cols();
@@ -44,7 +45,10 @@ template<typename MatrixType> void matrixRedux(const MatrixType& m)
minc = std::min(ei_real(minc), ei_real(m1(i,j)));
maxc = std::max(ei_real(maxc), ei_real(m1(i,j)));
}
+ const Scalar mean = s/Scalar(RealScalar(rows*cols));
+
VERIFY_IS_APPROX(m1.sum(), s);
+ VERIFY_IS_APPROX(m1.mean(), mean);
VERIFY_IS_APPROX(m1.prod(), p);
VERIFY_IS_APPROX(m1.real().minCoeff(), ei_real(minc));
VERIFY_IS_APPROX(m1.real().maxCoeff(), ei_real(maxc));
@@ -113,15 +117,24 @@ void test_redux()
{
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( matrixRedux(Matrix<float, 1, 1>()) );
+ CALL_SUBTEST_1( matrixRedux(Array<float, 1, 1>()) );
CALL_SUBTEST_2( matrixRedux(Matrix2f()) );
+ CALL_SUBTEST_2( matrixRedux(Array2f()) );
CALL_SUBTEST_3( matrixRedux(Matrix4d()) );
+ CALL_SUBTEST_3( matrixRedux(Array4d()) );
CALL_SUBTEST_4( matrixRedux(MatrixXcf(3, 3)) );
+ CALL_SUBTEST_4( matrixRedux(ArrayXXcf(3, 3)) );
CALL_SUBTEST_5( matrixRedux(MatrixXd(8, 12)) );
+ CALL_SUBTEST_5( matrixRedux(ArrayXXd(8, 12)) );
CALL_SUBTEST_6( matrixRedux(MatrixXi(8, 12)) );
+ CALL_SUBTEST_6( matrixRedux(ArrayXXi(8, 12)) );
}
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_7( vectorRedux(Vector4f()) );
+ CALL_SUBTEST_7( vectorRedux(Array4f()) );
CALL_SUBTEST_5( vectorRedux(VectorXd(10)) );
+ CALL_SUBTEST_5( vectorRedux(ArrayXd(10)) );
CALL_SUBTEST_8( vectorRedux(VectorXf(33)) );
+ CALL_SUBTEST_8( vectorRedux(ArrayXf(33)) );
}
}
diff --git a/test/stdvector_overload.cpp b/test/stdvector_overload.cpp
new file mode 100644
index 000000000..86f4308ad
--- /dev/null
+++ b/test/stdvector_overload.cpp
@@ -0,0 +1,176 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2010 Hauke Heibel <hauke.heibel@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#include "main.h"
+
+#include <Eigen/StdVector>
+#include <Eigen/Geometry>
+
+EIGEN_DEFINE_STL_VECTOR_SPECIALIZATION(Vector4f)
+
+EIGEN_DEFINE_STL_VECTOR_SPECIALIZATION(Matrix2f)
+EIGEN_DEFINE_STL_VECTOR_SPECIALIZATION(Matrix4f)
+EIGEN_DEFINE_STL_VECTOR_SPECIALIZATION(Matrix4d)
+
+EIGEN_DEFINE_STL_VECTOR_SPECIALIZATION(Transform3f)
+EIGEN_DEFINE_STL_VECTOR_SPECIALIZATION(Transform3d)
+
+EIGEN_DEFINE_STL_VECTOR_SPECIALIZATION(Quaternionf)
+EIGEN_DEFINE_STL_VECTOR_SPECIALIZATION(Quaterniond)
+
+template<typename MatrixType>
+void check_stdvector_matrix(const MatrixType& m)
+{
+ int rows = m.rows();
+ int cols = m.cols();
+ MatrixType x = MatrixType::Random(rows,cols), y = MatrixType::Random(rows,cols);
+ std::vector<MatrixType> v(10, MatrixType(rows,cols)), w(20, y);
+ v[5] = x;
+ w[6] = v[5];
+ VERIFY_IS_APPROX(w[6], v[5]);
+ v = w;
+ for(int i = 0; i < 20; i++)
+ {
+ VERIFY_IS_APPROX(w[i], v[i]);
+ }
+
+ v.resize(21);
+ v[20] = x;
+ VERIFY_IS_APPROX(v[20], x);
+ v.resize(22,y);
+ VERIFY_IS_APPROX(v[21], y);
+ v.push_back(x);
+ VERIFY_IS_APPROX(v[22], x);
+ VERIFY((size_t)&(v[22]) == (size_t)&(v[21]) + sizeof(MatrixType));
+
+ // do a lot of push_back such that the vector gets internally resized
+ // (with memory reallocation)
+ MatrixType* ref = &w[0];
+ for(int i=0; i<30 || ((ref==&w[0]) && i<300); ++i)
+ v.push_back(w[i%w.size()]);
+ for(unsigned int i=23; i<v.size(); ++i)
+ {
+ VERIFY(v[i]==w[(i-23)%w.size()]);
+ }
+}
+
+template<typename TransformType>
+void check_stdvector_transform(const TransformType&)
+{
+ typedef typename TransformType::MatrixType MatrixType;
+ TransformType x(MatrixType::Random()), y(MatrixType::Random());
+ std::vector<TransformType> v(10), w(20, y);
+ v[5] = x;
+ w[6] = v[5];
+ VERIFY_IS_APPROX(w[6], v[5]);
+ v = w;
+ for(int i = 0; i < 20; i++)
+ {
+ VERIFY_IS_APPROX(w[i], v[i]);
+ }
+
+ v.resize(21);
+ v[20] = x;
+ VERIFY_IS_APPROX(v[20], x);
+ v.resize(22,y);
+ VERIFY_IS_APPROX(v[21], y);
+ v.push_back(x);
+ VERIFY_IS_APPROX(v[22], x);
+ VERIFY((size_t)&(v[22]) == (size_t)&(v[21]) + sizeof(TransformType));
+
+ // do a lot of push_back such that the vector gets internally resized
+ // (with memory reallocation)
+ TransformType* ref = &w[0];
+ for(int i=0; i<30 || ((ref==&w[0]) && i<300); ++i)
+ v.push_back(w[i%w.size()]);
+ for(unsigned int i=23; i<v.size(); ++i)
+ {
+ VERIFY(v[i].matrix()==w[(i-23)%w.size()].matrix());
+ }
+}
+
+template<typename QuaternionType>
+void check_stdvector_quaternion(const QuaternionType&)
+{
+ typedef typename QuaternionType::Coefficients Coefficients;
+ QuaternionType x(Coefficients::Random()), y(Coefficients::Random());
+ std::vector<QuaternionType> v(10), w(20, y);
+ v[5] = x;
+ w[6] = v[5];
+ VERIFY_IS_APPROX(w[6], v[5]);
+ v = w;
+ for(int i = 0; i < 20; i++)
+ {
+ VERIFY_IS_APPROX(w[i], v[i]);
+ }
+
+ v.resize(21);
+ v[20] = x;
+ VERIFY_IS_APPROX(v[20], x);
+ v.resize(22,y);
+ VERIFY_IS_APPROX(v[21], y);
+ v.push_back(x);
+ VERIFY_IS_APPROX(v[22], x);
+ VERIFY((size_t)&(v[22]) == (size_t)&(v[21]) + sizeof(QuaternionType));
+
+ // do a lot of push_back such that the vector gets internally resized
+ // (with memory reallocation)
+ QuaternionType* ref = &w[0];
+ for(int i=0; i<30 || ((ref==&w[0]) && i<300); ++i)
+ v.push_back(w[i%w.size()]);
+ for(unsigned int i=23; i<v.size(); ++i)
+ {
+ VERIFY(v[i].coeffs()==w[(i-23)%w.size()].coeffs());
+ }
+}
+
+void test_stdvector_overload()
+{
+ // some non vectorizable fixed sizes
+ CALL_SUBTEST_1(check_stdvector_matrix(Vector2f()));
+ CALL_SUBTEST_1(check_stdvector_matrix(Matrix3f()));
+ CALL_SUBTEST_2(check_stdvector_matrix(Matrix3d()));
+
+ // some vectorizable fixed sizes
+ CALL_SUBTEST_1(check_stdvector_matrix(Matrix2f()));
+ CALL_SUBTEST_1(check_stdvector_matrix(Vector4f()));
+ CALL_SUBTEST_1(check_stdvector_matrix(Matrix4f()));
+ CALL_SUBTEST_2(check_stdvector_matrix(Matrix4d()));
+
+ // some dynamic sizes
+ CALL_SUBTEST_3(check_stdvector_matrix(MatrixXd(1,1)));
+ CALL_SUBTEST_3(check_stdvector_matrix(VectorXd(20)));
+ CALL_SUBTEST_3(check_stdvector_matrix(RowVectorXf(20)));
+ CALL_SUBTEST_3(check_stdvector_matrix(MatrixXcf(10,10)));
+
+ // some Transform
+ CALL_SUBTEST_4(check_stdvector_transform(Transform2f())); // does not need the specialization (2+1)^2 = 9
+ CALL_SUBTEST_4(check_stdvector_transform(Transform3f()));
+ CALL_SUBTEST_4(check_stdvector_transform(Transform3d()));
+
+ // some Quaternion
+ CALL_SUBTEST_5(check_stdvector_quaternion(Quaternionf()));
+ CALL_SUBTEST_5(check_stdvector_quaternion(Quaterniond()));
+}
diff --git a/test/submatrices.cpp b/test/submatrices.cpp
index 9cd6f3fab..d53fd4b6f 100644
--- a/test/submatrices.cpp
+++ b/test/submatrices.cpp
@@ -60,6 +60,7 @@ template<typename MatrixType> void submatrices(const MatrixType& m)
typedef typename MatrixType::RealScalar RealScalar;
typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> VectorType;
typedef Matrix<Scalar, 1, MatrixType::ColsAtCompileTime> RowVectorType;
+ typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, MatrixType::RowsAtCompileTime> SquareMatrixType;
int rows = m.rows();
int cols = m.cols();
@@ -67,11 +68,9 @@ template<typename MatrixType> void submatrices(const MatrixType& m)
m2 = MatrixType::Random(rows, cols),
m3(rows, cols),
mzero = MatrixType::Zero(rows, cols),
- ones = MatrixType::Ones(rows, cols),
- identity = Matrix<Scalar, MatrixType::RowsAtCompileTime, MatrixType::RowsAtCompileTime>
- ::Identity(rows, rows),
- square = Matrix<Scalar, MatrixType::RowsAtCompileTime, MatrixType::RowsAtCompileTime>
- ::Random(rows, rows);
+ ones = MatrixType::Ones(rows, cols);
+ SquareMatrixType identity = SquareMatrixType::Identity(rows, rows),
+ square = SquareMatrixType::Random(rows, rows);
VectorType v1 = VectorType::Random(rows),
v2 = VectorType::Random(rows),
v3 = VectorType::Random(rows),
@@ -222,6 +221,8 @@ void test_submatrices()
CALL_SUBTEST_5( submatrices(MatrixXcd(20, 20)) );
CALL_SUBTEST_6( submatrices(MatrixXf(20, 20)) );
+ CALL_SUBTEST_8( submatrices(Matrix<float,Dynamic,4>(3, 4)) );
+
CALL_SUBTEST_6( data_and_stride(MatrixXf(ei_random(5,50), ei_random(5,50))) );
CALL_SUBTEST_7( data_and_stride(Matrix<int,Dynamic,Dynamic,RowMajor>(ei_random(5,50), ei_random(5,50))) );
}
diff --git a/unsupported/Eigen/AlignedVector3 b/unsupported/Eigen/AlignedVector3
index 37018bfc6..6e9772cf5 100644
--- a/unsupported/Eigen/AlignedVector3
+++ b/unsupported/Eigen/AlignedVector3
@@ -64,7 +64,8 @@ template<typename _Scalar> class AlignedVector3
CoeffType m_coeffs;
public:
- EIGEN_GENERIC_PUBLIC_INTERFACE(AlignedVector3)
+ typedef MatrixBase<AlignedVector3<_Scalar> > Base;
+ EIGEN_DENSE_PUBLIC_INTERFACE(AlignedVector3)
using Base::operator*;
inline int rows() const { return 3; }
@@ -188,7 +189,7 @@ template<typename _Scalar> class AlignedVector3
}
template<typename Derived>
- inline bool isApprox(const MatrixBase<Derived>& other, RealScalar eps=dummy_precision<Scalar>()) const
+ inline bool isApprox(const MatrixBase<Derived>& other, RealScalar eps=NumTraits<Scalar>::dummy_precision()) const
{
return m_coeffs.template head<3>().isApprox(other,eps);
}
diff --git a/unsupported/Eigen/AutoDiff b/unsupported/Eigen/AutoDiff
index a8b3ea90a..97164525c 100644
--- a/unsupported/Eigen/AutoDiff
+++ b/unsupported/Eigen/AutoDiff
@@ -25,8 +25,6 @@
#ifndef EIGEN_AUTODIFF_MODULE
#define EIGEN_AUTODIFF_MODULE
-#include <Eigen/Array>
-
namespace Eigen {
/** \ingroup Unsupported_modules
diff --git a/unsupported/Eigen/FFT b/unsupported/Eigen/FFT
index 454308fb7..87c31749b 100644
--- a/unsupported/Eigen/FFT
+++ b/unsupported/Eigen/FFT
@@ -201,7 +201,7 @@ class FFT
{
m_impl.inv( dst,src,nfft );
if ( HasFlag( Unscaled ) == false)
- scale(dst,1./nfft,nfft); // scale the time series
+ scale(dst,Scalar(1./nfft_,nfft); // scale the time series
}
inline
@@ -209,7 +209,7 @@ class FFT
{
m_impl.inv( dst,src,nfft );
if ( HasFlag( Unscaled ) == false)
- scale(dst,1./nfft,nfft); // scale the time series
+ scale(dst,Scalar(1./nfft),nfft); // scale the time series
}
template<typename OutputDerived, typename ComplexDerived>
diff --git a/unsupported/Eigen/NonLinearOptimization b/unsupported/Eigen/NonLinearOptimization
index f15e09386..149b658f8 100644
--- a/unsupported/Eigen/NonLinearOptimization
+++ b/unsupported/Eigen/NonLinearOptimization
@@ -25,6 +25,8 @@
#ifndef EIGEN_NONLINEAROPTIMIZATION_MODULE
#define EIGEN_NONLINEAROPTIMIZATION_MODULE
+#include <vector>
+
#include <Eigen/Core>
#include <Eigen/Jacobi>
#include <Eigen/QR>
@@ -54,7 +56,7 @@ namespace Eigen {
* fortran. Those implementations have been carefully tuned, tested, and used
* for several decades.
*
- * The original fortran code was automatically translated (using f2c) in C and
+ * The original fortran code was automatically translated using f2c (http://en.wikipedia.org/wiki/F2c) in C,
* then c++, and then cleaned by several different authors.
* The last one of those cleanings being our starting point :
* http://devernay.free.fr/hacks/cminpack.html
@@ -108,19 +110,23 @@ namespace Eigen {
* handle the loop: init + loop until a stop condition is met. Those are provided for
* convenience.
*
- * As an example, the method LevenbergMarquardt.minimizeNumericalDiff() is
+ * As an example, the method LevenbergMarquardt::minimize() is
* implemented as follow :
* \code
- * LevenbergMarquardt.minimizeNumericalDiff(Matrix< Scalar, Dynamic, 1 > &x,
- * const int mode )
+ * Status LevenbergMarquardt<FunctorType,Scalar>::minimize(FVectorType &x, const int mode)
* {
- * Status status = minimizeNumericalDiffInit(x, mode);
- * while (status==Running)
- * status = minimizeNumericalDiffOneStep(x, mode);
+ * Status status = minimizeInit(x, mode);
+ * do {
+ * status = minimizeOneStep(x, mode);
+ * } while (status==Running);
* return status;
* }
* \endcode
*
+ * \section examples Examples
+ *
+ * The easiest way to understand how to use this module is by looking at the many examples in the file
+ * unsupported/test/NonLinearOptimization.cpp.
*/
#ifndef EIGEN_PARSED_BY_DOXYGEN
@@ -129,9 +135,7 @@ namespace Eigen {
#include "src/NonLinearOptimization/r1updt.h"
#include "src/NonLinearOptimization/r1mpyq.h"
#include "src/NonLinearOptimization/rwupdt.h"
-#include "src/NonLinearOptimization/qrfac.h"
#include "src/NonLinearOptimization/fdjac1.h"
-#include "src/NonLinearOptimization/qform.h"
#include "src/NonLinearOptimization/lmpar.h"
#include "src/NonLinearOptimization/dogleg.h"
#include "src/NonLinearOptimization/covar.h"
diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h b/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h
index fd1938a87..39c23cdc5 100644
--- a/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h
+++ b/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2009 Jitse Niesen <jitse@maths.leeds.ac.uk>
+// Copyright (C) 2009, 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>
//
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
@@ -29,74 +29,32 @@
template <typename Scalar> Scalar log2(Scalar v) { return std::log(v)/std::log(Scalar(2)); }
#endif
-/** \ingroup MatrixFunctions_Module
- *
- * \brief Compute the matrix exponential.
- *
- * \param[in] M matrix whose exponential is to be computed.
- * \param[out] result pointer to the matrix in which to store the result.
- *
- * The matrix exponential of \f$ M \f$ is defined by
- * \f[ \exp(M) = \sum_{k=0}^\infty \frac{M^k}{k!}. \f]
- * The matrix exponential can be used to solve linear ordinary
- * differential equations: the solution of \f$ y' = My \f$ with the
- * initial condition \f$ y(0) = y_0 \f$ is given by
- * \f$ y(t) = \exp(M) y_0 \f$.
- *
- * The cost of the computation is approximately \f$ 20 n^3 \f$ for
- * matrices of size \f$ n \f$. The number 20 depends weakly on the
- * norm of the matrix.
- *
- * The matrix exponential is computed using the scaling-and-squaring
- * method combined with Pad&eacute; approximation. The matrix is first
- * rescaled, then the exponential of the reduced matrix is computed
- * approximant, and then the rescaling is undone by repeated
- * squaring. The degree of the Pad&eacute; approximant is chosen such
- * that the approximation error is less than the round-off
- * error. However, errors may accumulate during the squaring phase.
- *
- * Details of the algorithm can be found in: Nicholas J. Higham, "The
- * scaling and squaring method for the matrix exponential revisited,"
- * <em>SIAM J. %Matrix Anal. Applic.</em>, <b>26</b>:1179&ndash;1193,
- * 2005.
- *
- * Example: The following program checks that
- * \f[ \exp \left[ \begin{array}{ccc}
- * 0 & \frac14\pi & 0 \\
- * -\frac14\pi & 0 & 0 \\
- * 0 & 0 & 0
- * \end{array} \right] = \left[ \begin{array}{ccc}
- * \frac12\sqrt2 & -\frac12\sqrt2 & 0 \\
- * \frac12\sqrt2 & \frac12\sqrt2 & 0 \\
- * 0 & 0 & 1
- * \end{array} \right]. \f]
- * This corresponds to a rotation of \f$ \frac14\pi \f$ radians around
- * the z-axis.
- *
- * \include MatrixExponential.cpp
- * Output: \verbinclude MatrixExponential.out
- *
- * \note \p M has to be a matrix of \c float, \c double,
- * \c complex<float> or \c complex<double> .
- */
-template <typename Derived>
-EIGEN_STRONG_INLINE void ei_matrix_exponential(const MatrixBase<Derived> &M,
- typename MatrixBase<Derived>::PlainMatrixType* result);
/** \ingroup MatrixFunctions_Module
* \brief Class for computing the matrix exponential.
+ * \tparam MatrixType type of the argument of the exponential,
+ * expected to be an instantiation of the Matrix class template.
*/
template <typename MatrixType>
class MatrixExponential {
public:
- /** \brief Compute the matrix exponential.
- *
- * \param M matrix whose exponential is to be computed.
- * \param result pointer to the matrix in which to store the result.
- */
- MatrixExponential(const MatrixType &M, MatrixType *result);
+ /** \brief Constructor.
+ *
+ * The class stores a reference to \p M, so it should not be
+ * changed (or destroyed) before compute() is called.
+ *
+ * \param[in] M matrix whose exponential is to be computed.
+ */
+ MatrixExponential(const MatrixType &M);
+
+ /** \brief Computes the matrix exponential.
+ *
+ * \param[out] result the matrix exponential of \p M in the constructor.
+ */
+ template <typename ResultType>
+ void compute(ResultType &result);
private:
@@ -109,7 +67,7 @@ class MatrixExponential {
* After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Pad&eacute;
* approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$.
*
- * \param A Argument of matrix exponential
+ * \param[in] A Argument of matrix exponential
*/
void pade3(const MatrixType &A);
@@ -118,7 +76,7 @@ class MatrixExponential {
* After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Pad&eacute;
* approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$.
*
- * \param A Argument of matrix exponential
+ * \param[in] A Argument of matrix exponential
*/
void pade5(const MatrixType &A);
@@ -127,7 +85,7 @@ class MatrixExponential {
* After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Pad&eacute;
* approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$.
*
- * \param A Argument of matrix exponential
+ * \param[in] A Argument of matrix exponential
*/
void pade7(const MatrixType &A);
@@ -136,7 +94,7 @@ class MatrixExponential {
* After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Pad&eacute;
* approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$.
*
- * \param A Argument of matrix exponential
+ * \param[in] A Argument of matrix exponential
*/
void pade9(const MatrixType &A);
@@ -145,7 +103,7 @@ class MatrixExponential {
* After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Pad&eacute;
* approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$.
*
- * \param A Argument of matrix exponential
+ * \param[in] A Argument of matrix exponential
*/
void pade13(const MatrixType &A);
@@ -171,10 +129,10 @@ class MatrixExponential {
void computeUV(float);
typedef typename ei_traits<MatrixType>::Scalar Scalar;
- typedef typename NumTraits<typename ei_traits<MatrixType>::Scalar>::Real RealScalar;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
- /** \brief Pointer to matrix whose exponential is to be computed. */
- const MatrixType* m_M;
+ /** \brief Reference to matrix whose exponential is to be computed. */
+ const MatrixType& m_M;
/** \brief Even-degree terms in numerator of Pad&eacute; approximant. */
MatrixType m_U;
@@ -199,8 +157,8 @@ class MatrixExponential {
};
template <typename MatrixType>
-MatrixExponential<MatrixType>::MatrixExponential(const MatrixType &M, MatrixType *result) :
- m_M(&M),
+MatrixExponential<MatrixType>::MatrixExponential(const MatrixType &M) :
+ m_M(M),
m_U(M.rows(),M.cols()),
m_V(M.rows(),M.cols()),
m_tmp1(M.rows(),M.cols()),
@@ -209,12 +167,19 @@ MatrixExponential<MatrixType>::MatrixExponential(const MatrixType &M, MatrixType
m_squarings(0),
m_l1norm(static_cast<float>(M.cwiseAbs().colwise().sum().maxCoeff()))
{
+ /* empty body */
+}
+
+template <typename MatrixType>
+template <typename ResultType>
+void MatrixExponential<MatrixType>::compute(ResultType &result)
+{
computeUV(RealScalar());
m_tmp1 = m_U + m_V; // numerator of Pade approximant
m_tmp2 = -m_U + m_V; // denominator of Pade approximant
- *result = m_tmp2.partialPivLu().solve(m_tmp1);
+ result = m_tmp2.partialPivLu().solve(m_tmp1);
for (int i=0; i<m_squarings; i++)
- *result *= *result; // undo scaling by repeated squaring
+ result *= result; // undo scaling by repeated squaring
}
template <typename MatrixType>
@@ -286,13 +251,13 @@ template <typename MatrixType>
void MatrixExponential<MatrixType>::computeUV(float)
{
if (m_l1norm < 4.258730016922831e-001) {
- pade3(*m_M);
+ pade3(m_M);
} else if (m_l1norm < 1.880152677804762e+000) {
- pade5(*m_M);
+ pade5(m_M);
} else {
const float maxnorm = 3.925724783138660f;
m_squarings = std::max(0, (int)ceil(log2(m_l1norm / maxnorm)));
- MatrixType A = *m_M / std::pow(Scalar(2), Scalar(static_cast<RealScalar>(m_squarings)));
+ MatrixType A = m_M / std::pow(Scalar(2), Scalar(static_cast<RealScalar>(m_squarings)));
pade7(A);
}
}
@@ -301,27 +266,126 @@ template <typename MatrixType>
void MatrixExponential<MatrixType>::computeUV(double)
{
if (m_l1norm < 1.495585217958292e-002) {
- pade3(*m_M);
+ pade3(m_M);
} else if (m_l1norm < 2.539398330063230e-001) {
- pade5(*m_M);
+ pade5(m_M);
} else if (m_l1norm < 9.504178996162932e-001) {
- pade7(*m_M);
+ pade7(m_M);
} else if (m_l1norm < 2.097847961257068e+000) {
- pade9(*m_M);
+ pade9(m_M);
} else {
const double maxnorm = 5.371920351148152;
m_squarings = std::max(0, (int)ceil(log2(m_l1norm / maxnorm)));
- MatrixType A = *m_M / std::pow(Scalar(2), Scalar(m_squarings));
+ MatrixType A = m_M / std::pow(Scalar(2), Scalar(m_squarings));
pade13(A);
}
}
+/** \ingroup MatrixFunctions_Module
+ *
+ * \brief Proxy for the matrix exponential of some matrix (expression).
+ *
+ * \tparam Derived Type of the argument to the matrix exponential.
+ *
+ * This class holds the argument to the matrix exponential until it
+ * is assigned or evaluated for some other reason (so the argument
+ * should not be changed in the meantime). It is the return type of
+ * ei_matrix_exponential() and most of the time this is the only way
+ * it is used.
+ */
+template<typename Derived> struct MatrixExponentialReturnValue
+: public ReturnByValue<MatrixExponentialReturnValue<Derived> >
+{
+ public:
+ /** \brief Constructor.
+ *
+ * \param[in] src %Matrix (expression) forming the argument of the
+ * matrix exponential.
+ */
+ MatrixExponentialReturnValue(const Derived& src) : m_src(src) { }
+
+ /** \brief Compute the matrix exponential.
+ *
+ * \param[out] result the matrix exponential of \p src in the
+ * constructor.
+ */
+ template <typename ResultType>
+ inline void evalTo(ResultType& result) const
+ {
+ const typename ei_eval<Derived>::type srcEvaluated = m_src.eval();
+ MatrixExponential<typename Derived::PlainMatrixType> me(srcEvaluated);
+ me.compute(result);
+ }
+
+ int rows() const { return m_src.rows(); }
+ int cols() const { return m_src.cols(); }
+
+ protected:
+ const Derived& m_src;
+};
+
+template<typename Derived>
+struct ei_traits<MatrixExponentialReturnValue<Derived> >
+{
+ typedef typename Derived::PlainMatrixType ReturnMatrixType;
+};
+
+/** \ingroup MatrixFunctions_Module
+ *
+ * \brief Compute the matrix exponential.
+ *
+ * \param[in] M matrix whose exponential is to be computed.
+ * \returns expression representing the matrix exponential of \p M.
+ *
+ * The matrix exponential of \f$ M \f$ is defined by
+ * \f[ \exp(M) = \sum_{k=0}^\infty \frac{M^k}{k!}. \f]
+ * The matrix exponential can be used to solve linear ordinary
+ * differential equations: the solution of \f$ y' = My \f$ with the
+ * initial condition \f$ y(0) = y_0 \f$ is given by
+ * \f$ y(t) = \exp(M) y_0 \f$.
+ *
+ * The cost of the computation is approximately \f$ 20 n^3 \f$ for
+ * matrices of size \f$ n \f$. The number 20 depends weakly on the
+ * norm of the matrix.
+ *
+ * The matrix exponential is computed using the scaling-and-squaring
+ * method combined with Pad&eacute; approximation. The matrix is first
+ * rescaled, then the exponential of the reduced matrix is computed
+ * approximant, and then the rescaling is undone by repeated
+ * squaring. The degree of the Pad&eacute; approximant is chosen such
+ * that the approximation error is less than the round-off
+ * error. However, errors may accumulate during the squaring phase.
+ *
+ * Details of the algorithm can be found in: Nicholas J. Higham, "The
+ * scaling and squaring method for the matrix exponential revisited,"
+ * <em>SIAM J. %Matrix Anal. Applic.</em>, <b>26</b>:1179&ndash;1193,
+ * 2005.
+ *
+ * Example: The following program checks that
+ * \f[ \exp \left[ \begin{array}{ccc}
+ * 0 & \frac14\pi & 0 \\
+ * -\frac14\pi & 0 & 0 \\
+ * 0 & 0 & 0
+ * \end{array} \right] = \left[ \begin{array}{ccc}
+ * \frac12\sqrt2 & -\frac12\sqrt2 & 0 \\
+ * \frac12\sqrt2 & \frac12\sqrt2 & 0 \\
+ * 0 & 0 & 1
+ * \end{array} \right]. \f]
+ * This corresponds to a rotation of \f$ \frac14\pi \f$ radians around
+ * the z-axis.
+ *
+ * \include MatrixExponential.cpp
+ * Output: \verbinclude MatrixExponential.out
+ *
+ * \note \p M has to be a matrix of \c float, \c double,
+ * \c complex<float> or \c complex<double> .
+ */
template <typename Derived>
-EIGEN_STRONG_INLINE void ei_matrix_exponential(const MatrixBase<Derived> &M,
- typename MatrixBase<Derived>::PlainMatrixType* result)
+MatrixExponentialReturnValue<Derived>
+ei_matrix_exponential(const MatrixBase<Derived> &M)
{
ei_assert(M.rows() == M.cols());
- MatrixExponential<typename MatrixBase<Derived>::PlainMatrixType>(M, result);
+ return MatrixExponentialReturnValue<Derived>(M.derived());
}
#endif // EIGEN_MATRIX_EXPONENTIAL
diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h b/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h
index fbc55507f..d63bcbce9 100644
--- a/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h
+++ b/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h
@@ -28,66 +28,11 @@
#include "StemFunction.h"
#include "MatrixFunctionAtomic.h"
-/** \ingroup MatrixFunctions_Module
- *
- * \brief Compute a matrix function.
- *
- * \param[in] M argument of matrix function, should be a square matrix.
- * \param[in] f an entire function; \c f(x,n) should compute the n-th derivative of f at x.
- * \param[out] result pointer to the matrix in which to store the result, \f$ f(M) \f$.
- *
- * This function computes \f$ f(A) \f$ and stores the result in the
- * matrix pointed to by \p result.
- *
- * Suppose that \p M is a matrix whose entries have type \c Scalar.
- * Then, the second argument, \p f, should be a function with prototype
- * \code
- * ComplexScalar f(ComplexScalar, int)
- * \endcode
- * where \c ComplexScalar = \c std::complex<Scalar> if \c Scalar is
- * real (e.g., \c float or \c double) and \c ComplexScalar =
- * \c Scalar if \c Scalar is complex. The return value of \c f(x,n)
- * should be \f$ f^{(n)}(x) \f$, the n-th derivative of f at x.
- *
- * This routine uses the algorithm described in:
- * Philip Davies and Nicholas J. Higham,
- * "A Schur-Parlett algorithm for computing matrix functions",
- * <em>SIAM J. %Matrix Anal. Applic.</em>, <b>25</b>:464&ndash;485, 2003.
- *
- * The actual work is done by the MatrixFunction class.
- *
- * Example: The following program checks that
- * \f[ \exp \left[ \begin{array}{ccc}
- * 0 & \frac14\pi & 0 \\
- * -\frac14\pi & 0 & 0 \\
- * 0 & 0 & 0
- * \end{array} \right] = \left[ \begin{array}{ccc}
- * \frac12\sqrt2 & -\frac12\sqrt2 & 0 \\
- * \frac12\sqrt2 & \frac12\sqrt2 & 0 \\
- * 0 & 0 & 1
- * \end{array} \right]. \f]
- * This corresponds to a rotation of \f$ \frac14\pi \f$ radians around
- * the z-axis. This is the same example as used in the documentation
- * of ei_matrix_exponential().
- *
- * \include MatrixFunction.cpp
- * Output: \verbinclude MatrixFunction.out
- *
- * Note that the function \c expfn is defined for complex numbers
- * \c x, even though the matrix \c A is over the reals. Instead of
- * \c expfn, we could also have used StdStemFunctions::exp:
- * \code
- * ei_matrix_function(A, StdStemFunctions<std::complex<double> >::exp, &B);
- * \endcode
- */
-template <typename Derived>
-EIGEN_STRONG_INLINE void ei_matrix_function(const MatrixBase<Derived>& M,
- typename ei_stem_function<typename ei_traits<Derived>::Scalar>::type f,
- typename MatrixBase<Derived>::PlainMatrixType* result);
-
-/** \ingroup MatrixFunctions_Module
- * \brief Helper class for computing matrix functions.
+/** \ingroup MatrixFunctions_Module
+ * \brief Class for computing matrix exponentials.
+ * \tparam MatrixType type of the argument of the matrix function,
+ * expected to be an instantiation of the Matrix class template.
*/
template <typename MatrixType, int IsComplex = NumTraits<typename ei_traits<MatrixType>::Scalar>::IsComplex>
class MatrixFunction
@@ -99,18 +44,26 @@ class MatrixFunction
public:
- /** \brief Constructor. Computes matrix function.
+ /** \brief Constructor.
*
* \param[in] A argument of matrix function, should be a square matrix.
* \param[in] f an entire function; \c f(x,n) should compute the n-th derivative of f at x.
- * \param[out] result pointer to the matrix in which to store the result, \f$ f(A) \f$.
*
- * This function computes \f$ f(A) \f$ and stores the result in
- * the matrix pointed to by \p result.
+ * The class stores a reference to \p A, so it should not be
+ * changed (or destroyed) before compute() is called.
+ */
+ MatrixFunction(const MatrixType& A, StemFunction f);
+
+ /** \brief Compute the matrix function.
+ *
+ * \param[out] result the function \p f applied to \p A, as
+ * specified in the constructor.
*
- * See ei_matrix_function() for details.
+ * See ei_matrix_function() for details on how this computation
+ * is implemented.
*/
- MatrixFunction(const MatrixType& A, StemFunction f, MatrixType* result);
+ template <typename ResultType>
+ void compute(ResultType &result);
};
@@ -136,23 +89,36 @@ class MatrixFunction<MatrixType, 0>
public:
- /** \brief Constructor. Computes matrix function.
+ /** \brief Constructor.
*
* \param[in] A argument of matrix function, should be a square matrix.
* \param[in] f an entire function; \c f(x,n) should compute the n-th derivative of f at x.
- * \param[out] result pointer to the matrix in which to store the result, \f$ f(A) \f$.
+ */
+ MatrixFunction(const MatrixType& A, StemFunction f) : m_A(A), m_f(f) { }
+
+ /** \brief Compute the matrix function.
+ *
+ * \param[out] result the function \p f applied to \p A, as
+ * specified in the constructor.
*
* This function converts the real matrix \c A to a complex matrix,
* uses MatrixFunction<MatrixType,1> and then converts the result back to
* a real matrix.
*/
- MatrixFunction(const MatrixType& A, StemFunction f, MatrixType* result)
+ template <typename ResultType>
+ void compute(ResultType& result)
{
- ComplexMatrix CA = A.template cast<ComplexScalar>();
+ ComplexMatrix CA = m_A.template cast<ComplexScalar>();
ComplexMatrix Cresult;
- MatrixFunction<ComplexMatrix>(CA, f, &Cresult);
- *result = Cresult.real();
+ MatrixFunction<ComplexMatrix> mf(CA, m_f);
+ mf.compute(Cresult);
+ result = Cresult.real();
}
+
+ private:
+
+ const MatrixType& m_A; /**< \brief Reference to argument of matrix function. */
+ StemFunction *m_f; /**< \brief Stem function for matrix function under consideration */
};
@@ -179,17 +145,12 @@ class MatrixFunction<MatrixType, 1>
public:
- /** \brief Constructor. Computes matrix function.
- *
- * \param[in] A argument of matrix function, should be a square matrix.
- * \param[in] f an entire function; \c f(x,n) should compute the n-th derivative of f at x.
- * \param[out] result pointer to the matrix in which to store the result, \f$ f(A) \f$.
- */
- MatrixFunction(const MatrixType& A, StemFunction f, MatrixType* result);
+ MatrixFunction(const MatrixType& A, StemFunction f);
+ template <typename ResultType> void compute(ResultType& result);
private:
- void computeSchurDecomposition(const MatrixType& A);
+ void computeSchurDecomposition();
void partitionEigenvalues();
typename ListOfClusters::iterator findCluster(Scalar key);
void computeClusterSize();
@@ -202,6 +163,7 @@ class MatrixFunction<MatrixType, 1>
void computeOffDiagonal();
DynMatrixType solveTriangularSylvester(const DynMatrixType& A, const DynMatrixType& B, const DynMatrixType& C);
+ const MatrixType& m_A; /**< \brief Reference to argument of matrix function. */
StemFunction *m_f; /**< \brief Stem function for matrix function under consideration */
MatrixType m_T; /**< \brief Triangular part of Schur decomposition */
MatrixType m_U; /**< \brief Unitary part of Schur decomposition */
@@ -221,11 +183,28 @@ class MatrixFunction<MatrixType, 1>
static const RealScalar separation() { return static_cast<RealScalar>(0.01); }
};
+/** \brief Constructor.
+ *
+ * \param[in] A argument of matrix function, should be a square matrix.
+ * \param[in] f an entire function; \c f(x,n) should compute the n-th derivative of f at x.
+ */
template <typename MatrixType>
-MatrixFunction<MatrixType,1>::MatrixFunction(const MatrixType& A, StemFunction f, MatrixType* result) :
- m_f(f)
+MatrixFunction<MatrixType,1>::MatrixFunction(const MatrixType& A, StemFunction f) :
+ m_A(A), m_f(f)
{
- computeSchurDecomposition(A);
+ /* empty body */
+}
+
+/** \brief Compute the matrix function.
+ *
+ * \param[out] result the function \p f applied to \p A, as
+ * specified in the constructor.
+ */
+template <typename MatrixType>
+template <typename ResultType>
+void MatrixFunction<MatrixType,1>::compute(ResultType& result)
+{
+ computeSchurDecomposition();
partitionEigenvalues();
computeClusterSize();
computeBlockStart();
@@ -233,14 +212,14 @@ MatrixFunction<MatrixType,1>::MatrixFunction(const MatrixType& A, StemFunction f
permuteSchur();
computeBlockAtomic();
computeOffDiagonal();
- *result = m_U * m_fT * m_U.adjoint();
+ result = m_U * m_fT * m_U.adjoint();
}
-/** \brief Store the Schur decomposition of \p A in #m_T and #m_U */
+/** \brief Store the Schur decomposition of #m_A in #m_T and #m_U */
template <typename MatrixType>
-void MatrixFunction<MatrixType,1>::computeSchurDecomposition(const MatrixType& A)
+void MatrixFunction<MatrixType,1>::computeSchurDecomposition()
{
- const ComplexSchur<MatrixType> schurOfA(A);
+ const ComplexSchur<MatrixType> schurOfA(m_A);
m_T = schurOfA.matrixT();
m_U = schurOfA.matrixU();
}
@@ -498,23 +477,129 @@ typename MatrixFunction<MatrixType,1>::DynMatrixType MatrixFunction<MatrixType,1
return X;
}
+/** \ingroup MatrixFunctions_Module
+ *
+ * \brief Proxy for the matrix function of some matrix (expression).
+ *
+ * \tparam Derived Type of the argument to the matrix function.
+ *
+ * This class holds the argument to the matrix function until it is
+ * assigned or evaluated for some other reason (so the argument
+ * should not be changed in the meantime). It is the return type of
+ * ei_matrix_function() and related functions and most of the time
+ * this is the only way it is used.
+ */
+template<typename Derived> class MatrixFunctionReturnValue
+: public ReturnByValue<MatrixFunctionReturnValue<Derived> >
+{
+ private:
+
+ typedef typename ei_traits<Derived>::Scalar Scalar;
+ typedef typename ei_stem_function<Scalar>::type StemFunction;
+
+ public:
+
+ /** \brief Constructor.
+ *
+ * \param[in] A %Matrix (expression) forming the argument of the
+ * matrix function.
+ * \param[in] f Stem function for matrix function under consideration.
+ */
+ MatrixFunctionReturnValue(const Derived& A, StemFunction f) : m_A(A), m_f(f) { }
+
+ /** \brief Compute the matrix function.
+ *
+ * \param[out] result \p f applied to \p A, where \p f and \p A
+ * are as in the constructor.
+ */
+ template <typename ResultType>
+ inline void evalTo(ResultType& result) const
+ {
+ const typename ei_eval<Derived>::type Aevaluated = m_A.eval();
+ MatrixFunction<typename Derived::PlainMatrixType> mf(Aevaluated, m_f);
+ mf.compute(result);
+ }
+
+ int rows() const { return m_A.rows(); }
+ int cols() const { return m_A.cols(); }
+ private:
+ const Derived& m_A;
+ StemFunction *m_f;
+};
+
+template<typename Derived>
+struct ei_traits<MatrixFunctionReturnValue<Derived> >
+{
+ typedef typename Derived::PlainMatrixType ReturnMatrixType;
+};
+
+
+/** \ingroup MatrixFunctions_Module
+ *
+ * \brief Compute a matrix function.
+ *
+ * \param[in] M argument of matrix function, should be a square matrix.
+ * \param[in] f an entire function; \c f(x,n) should compute the n-th
+ * derivative of f at x.
+ * \returns expression representing \p f applied to \p M.
+ *
+ * Suppose that \p M is a matrix whose entries have type \c Scalar.
+ * Then, the second argument, \p f, should be a function with prototype
+ * \code
+ * ComplexScalar f(ComplexScalar, int)
+ * \endcode
+ * where \c ComplexScalar = \c std::complex<Scalar> if \c Scalar is
+ * real (e.g., \c float or \c double) and \c ComplexScalar =
+ * \c Scalar if \c Scalar is complex. The return value of \c f(x,n)
+ * should be \f$ f^{(n)}(x) \f$, the n-th derivative of f at x.
+ *
+ * This routine uses the algorithm described in:
+ * Philip Davies and Nicholas J. Higham,
+ * "A Schur-Parlett algorithm for computing matrix functions",
+ * <em>SIAM J. %Matrix Anal. Applic.</em>, <b>25</b>:464&ndash;485, 2003.
+ *
+ * The actual work is done by the MatrixFunction class.
+ *
+ * Example: The following program checks that
+ * \f[ \exp \left[ \begin{array}{ccc}
+ * 0 & \frac14\pi & 0 \\
+ * -\frac14\pi & 0 & 0 \\
+ * 0 & 0 & 0
+ * \end{array} \right] = \left[ \begin{array}{ccc}
+ * \frac12\sqrt2 & -\frac12\sqrt2 & 0 \\
+ * \frac12\sqrt2 & \frac12\sqrt2 & 0 \\
+ * 0 & 0 & 1
+ * \end{array} \right]. \f]
+ * This corresponds to a rotation of \f$ \frac14\pi \f$ radians around
+ * the z-axis. This is the same example as used in the documentation
+ * of ei_matrix_exponential().
+ *
+ * \include MatrixFunction.cpp
+ * Output: \verbinclude MatrixFunction.out
+ *
+ * Note that the function \c expfn is defined for complex numbers
+ * \c x, even though the matrix \c A is over the reals. Instead of
+ * \c expfn, we could also have used StdStemFunctions::exp:
+ * \code
+ * ei_matrix_function(A, StdStemFunctions<std::complex<double> >::exp, &B);
+ * \endcode
+ */
template <typename Derived>
-EIGEN_STRONG_INLINE void ei_matrix_function(const MatrixBase<Derived>& M,
- typename ei_stem_function<typename ei_traits<Derived>::Scalar>::type f,
- typename MatrixBase<Derived>::PlainMatrixType* result)
+MatrixFunctionReturnValue<Derived>
+ei_matrix_function(const MatrixBase<Derived> &M,
+ typename ei_stem_function<typename ei_traits<Derived>::Scalar>::type f)
{
ei_assert(M.rows() == M.cols());
- typedef typename MatrixBase<Derived>::PlainMatrixType PlainMatrixType;
- MatrixFunction<PlainMatrixType>(M, f, result);
+ return MatrixFunctionReturnValue<Derived>(M.derived(), f);
}
/** \ingroup MatrixFunctions_Module
*
* \brief Compute the matrix sine.
*
- * \param[in] M a square matrix.
- * \param[out] result pointer to matrix in which to store the result, \f$ \sin(M) \f$
+ * \param[in] M a square matrix.
+ * \returns expression representing \f$ \sin(M) \f$.
*
* This function calls ei_matrix_function() with StdStemFunctions::sin().
*
@@ -522,44 +607,42 @@ EIGEN_STRONG_INLINE void ei_matrix_function(const MatrixBase<Derived>& M,
* Output: \verbinclude MatrixSine.out
*/
template <typename Derived>
-EIGEN_STRONG_INLINE void ei_matrix_sin(const MatrixBase<Derived>& M,
- typename MatrixBase<Derived>::PlainMatrixType* result)
+MatrixFunctionReturnValue<Derived>
+ei_matrix_sin(const MatrixBase<Derived>& M)
{
ei_assert(M.rows() == M.cols());
- typedef typename MatrixBase<Derived>::PlainMatrixType PlainMatrixType;
- typedef typename ei_traits<PlainMatrixType>::Scalar Scalar;
+ typedef typename ei_traits<Derived>::Scalar Scalar;
typedef typename ei_stem_function<Scalar>::ComplexScalar ComplexScalar;
- MatrixFunction<PlainMatrixType>(M, StdStemFunctions<ComplexScalar>::sin, result);
+ return MatrixFunctionReturnValue<Derived>(M.derived(), StdStemFunctions<ComplexScalar>::sin);
}
/** \ingroup MatrixFunctions_Module
*
* \brief Compute the matrix cosine.
*
- * \param[in] M a square matrix.
- * \param[out] result pointer to matrix in which to store the result, \f$ \cos(M) \f$
+ * \param[in] M a square matrix.
+ * \returns expression representing \f$ \cos(M) \f$.
*
* This function calls ei_matrix_function() with StdStemFunctions::cos().
*
* \sa ei_matrix_sin() for an example.
*/
template <typename Derived>
-EIGEN_STRONG_INLINE void ei_matrix_cos(const MatrixBase<Derived>& M,
- typename MatrixBase<Derived>::PlainMatrixType* result)
+MatrixFunctionReturnValue<Derived>
+ei_matrix_cos(const MatrixBase<Derived>& M)
{
ei_assert(M.rows() == M.cols());
- typedef typename MatrixBase<Derived>::PlainMatrixType PlainMatrixType;
- typedef typename ei_traits<PlainMatrixType>::Scalar Scalar;
+ typedef typename ei_traits<Derived>::Scalar Scalar;
typedef typename ei_stem_function<Scalar>::ComplexScalar ComplexScalar;
- MatrixFunction<PlainMatrixType>(M, StdStemFunctions<ComplexScalar>::cos, result);
+ return MatrixFunctionReturnValue<Derived>(M.derived(), StdStemFunctions<ComplexScalar>::cos);
}
/** \ingroup MatrixFunctions_Module
*
* \brief Compute the matrix hyperbolic sine.
*
- * \param[in] M a square matrix.
- * \param[out] result pointer to matrix in which to store the result, \f$ \sinh(M) \f$
+ * \param[in] M a square matrix.
+ * \returns expression representing \f$ \sinh(M) \f$
*
* This function calls ei_matrix_function() with StdStemFunctions::sinh().
*
@@ -567,36 +650,34 @@ EIGEN_STRONG_INLINE void ei_matrix_cos(const MatrixBase<Derived>& M,
* Output: \verbinclude MatrixSinh.out
*/
template <typename Derived>
-EIGEN_STRONG_INLINE void ei_matrix_sinh(const MatrixBase<Derived>& M,
- typename MatrixBase<Derived>::PlainMatrixType* result)
+MatrixFunctionReturnValue<Derived>
+ei_matrix_sinh(const MatrixBase<Derived>& M)
{
ei_assert(M.rows() == M.cols());
- typedef typename MatrixBase<Derived>::PlainMatrixType PlainMatrixType;
- typedef typename ei_traits<PlainMatrixType>::Scalar Scalar;
+ typedef typename ei_traits<Derived>::Scalar Scalar;
typedef typename ei_stem_function<Scalar>::ComplexScalar ComplexScalar;
- MatrixFunction<PlainMatrixType>(M, StdStemFunctions<ComplexScalar>::sinh, result);
+ return MatrixFunctionReturnValue<Derived>(M.derived(), StdStemFunctions<ComplexScalar>::sinh);
}
/** \ingroup MatrixFunctions_Module
*
- * \brief Compute the matrix hyberpolic cosine.
+ * \brief Compute the matrix hyberbolic cosine.
*
- * \param[in] M a square matrix.
- * \param[out] result pointer to matrix in which to store the result, \f$ \cosh(M) \f$
+ * \param[in] M a square matrix.
+ * \returns expression representing \f$ \cosh(M) \f$
*
* This function calls ei_matrix_function() with StdStemFunctions::cosh().
*
* \sa ei_matrix_sinh() for an example.
*/
template <typename Derived>
-EIGEN_STRONG_INLINE void ei_matrix_cosh(const MatrixBase<Derived>& M,
- typename MatrixBase<Derived>::PlainMatrixType* result)
+MatrixFunctionReturnValue<Derived>
+ei_matrix_cosh(const MatrixBase<Derived>& M)
{
ei_assert(M.rows() == M.cols());
- typedef typename MatrixBase<Derived>::PlainMatrixType PlainMatrixType;
- typedef typename ei_traits<PlainMatrixType>::Scalar Scalar;
+ typedef typename ei_traits<Derived>::Scalar Scalar;
typedef typename ei_stem_function<Scalar>::ComplexScalar ComplexScalar;
- MatrixFunction<PlainMatrixType>(M, StdStemFunctions<ComplexScalar>::cosh, result);
+ return MatrixFunctionReturnValue<Derived>(M.derived(), StdStemFunctions<ComplexScalar>::cosh);
}
#endif // EIGEN_MATRIX_FUNCTION
diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixFunctionAtomic.h b/unsupported/Eigen/src/MatrixFunctions/MatrixFunctionAtomic.h
index d7409371b..4bcae47c0 100644
--- a/unsupported/Eigen/src/MatrixFunctions/MatrixFunctionAtomic.h
+++ b/unsupported/Eigen/src/MatrixFunctions/MatrixFunctionAtomic.h
@@ -85,7 +85,7 @@ MatrixType MatrixFunctionAtomic<MatrixType>::compute(const MatrixType& A)
{
// TODO: Use that A is upper triangular
m_Arows = A.rows();
- m_avgEival = A.trace() / Scalar(m_Arows);
+ m_avgEival = A.trace() / Scalar(RealScalar(m_Arows));
m_Ashifted = A - m_avgEival * MatrixType::Identity(m_Arows, m_Arows);
computeMu();
MatrixType F = m_f(m_avgEival, 0) * MatrixType::Identity(m_Arows, m_Arows);
@@ -94,7 +94,7 @@ MatrixType MatrixFunctionAtomic<MatrixType>::compute(const MatrixType& A)
for (int s = 1; s < 1.1 * m_Arows + 10; s++) { // upper limit is fairly arbitrary
Fincr = m_f(m_avgEival, s) * P;
F += Fincr;
- P = (1/(s + 1.0)) * P * m_Ashifted;
+ P = Scalar(RealScalar(1.0/(s + 1))) * P * m_Ashifted;
if (taylorConverged(s, F, Fincr, P)) {
return F;
}
@@ -121,19 +121,19 @@ bool MatrixFunctionAtomic<MatrixType>::taylorConverged(int s, const MatrixType&
const int n = F.rows();
const RealScalar F_norm = F.cwiseAbs().rowwise().sum().maxCoeff();
const RealScalar Fincr_norm = Fincr.cwiseAbs().rowwise().sum().maxCoeff();
- if (Fincr_norm < epsilon<Scalar>() * F_norm) {
+ if (Fincr_norm < NumTraits<Scalar>::epsilon() * F_norm) {
RealScalar delta = 0;
RealScalar rfactorial = 1;
for (int r = 0; r < n; r++) {
RealScalar mx = 0;
for (int i = 0; i < n; i++)
- mx = std::max(mx, std::abs(m_f(m_Ashifted(i, i) + m_avgEival, s+r)));
- if (r != 0)
- rfactorial *= r;
+ mx = std::max(mx, std::abs(m_f(m_Ashifted(i, i) + m_avgEival, s+r)));
+ if (r != 0)
+ rfactorial *= RealScalar(r);
delta = std::max(delta, mx / rfactorial);
}
const RealScalar P_norm = P.cwiseAbs().rowwise().sum().maxCoeff();
- if (m_mu * delta * P_norm < epsilon<Scalar>() * F_norm)
+ if (m_mu * delta * P_norm < NumTraits<Scalar>::epsilon() * F_norm)
return true;
}
return false;
diff --git a/unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h b/unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h
index 3bf3d12ea..0754a426b 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h
@@ -28,6 +28,19 @@
#ifndef EIGEN_HYBRIDNONLINEARSOLVER_H
#define EIGEN_HYBRIDNONLINEARSOLVER_H
+namespace HybridNonLinearSolverSpace {
+ enum Status {
+ Running = -1,
+ ImproperInputParameters = 0,
+ RelativeErrorTooSmall = 1,
+ TooManyFunctionEvaluation = 2,
+ TolTooSmall = 3,
+ NotMakingProgressJacobian = 4,
+ NotMakingProgressIterations = 5,
+ UserAksed = 6
+ };
+}
+
/**
* \ingroup NonLinearOptimization_Module
* \brief Finds a zero of a system of n
@@ -46,22 +59,11 @@ public:
HybridNonLinearSolver(FunctorType &_functor)
: functor(_functor) { nfev=njev=iter = 0; fnorm= 0.; }
- enum Status {
- Running = -1,
- ImproperInputParameters = 0,
- RelativeErrorTooSmall = 1,
- TooManyFunctionEvaluation = 2,
- TolTooSmall = 3,
- NotMakingProgressJacobian = 4,
- NotMakingProgressIterations = 5,
- UserAksed = 6
- };
-
struct Parameters {
Parameters()
: factor(Scalar(100.))
, maxfev(1000)
- , xtol(ei_sqrt(epsilon<Scalar>()))
+ , xtol(ei_sqrt(NumTraits<Scalar>::epsilon()))
, nb_of_subdiagonals(-1)
, nb_of_superdiagonals(-1)
, epsfcn(Scalar(0.)) {}
@@ -74,47 +76,50 @@ public:
};
typedef Matrix< Scalar, Dynamic, 1 > FVectorType;
typedef Matrix< Scalar, Dynamic, Dynamic > JacobianType;
+ /* TODO: if eigen provides a triangular storage, use it here */
+ typedef Matrix< Scalar, Dynamic, Dynamic > UpperTriangularType;
- Status hybrj1(
+ HybridNonLinearSolverSpace::Status hybrj1(
FVectorType &x,
- const Scalar tol = ei_sqrt(epsilon<Scalar>())
+ const Scalar tol = ei_sqrt(NumTraits<Scalar>::epsilon())
);
- Status solveInit(
+ HybridNonLinearSolverSpace::Status solveInit(
FVectorType &x,
const int mode=1
);
- Status solveOneStep(
+ HybridNonLinearSolverSpace::Status solveOneStep(
FVectorType &x,
const int mode=1
);
- Status solve(
+ HybridNonLinearSolverSpace::Status solve(
FVectorType &x,
const int mode=1
);
- Status hybrd1(
+ HybridNonLinearSolverSpace::Status hybrd1(
FVectorType &x,
- const Scalar tol = ei_sqrt(epsilon<Scalar>())
+ const Scalar tol = ei_sqrt(NumTraits<Scalar>::epsilon())
);
- Status solveNumericalDiffInit(
+ HybridNonLinearSolverSpace::Status solveNumericalDiffInit(
FVectorType &x,
const int mode=1
);
- Status solveNumericalDiffOneStep(
+ HybridNonLinearSolverSpace::Status solveNumericalDiffOneStep(
FVectorType &x,
const int mode=1
);
- Status solveNumericalDiff(
+ HybridNonLinearSolverSpace::Status solveNumericalDiff(
FVectorType &x,
const int mode=1
);
void resetParameters(void) { parameters = Parameters(); }
Parameters parameters;
- FVectorType fvec, R, qtf, diag;
+ FVectorType fvec, qtf, diag;
JacobianType fjac;
+ UpperTriangularType R;
int nfev;
int njev;
int iter;
@@ -139,7 +144,7 @@ private:
template<typename FunctorType, typename Scalar>
-typename HybridNonLinearSolver<FunctorType,Scalar>::Status
+HybridNonLinearSolverSpace::Status
HybridNonLinearSolver<FunctorType,Scalar>::hybrj1(
FVectorType &x,
const Scalar tol
@@ -149,7 +154,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::hybrj1(
/* check the input parameters for errors. */
if (n <= 0 || tol < 0.)
- return ImproperInputParameters;
+ return HybridNonLinearSolverSpace::ImproperInputParameters;
resetParameters();
parameters.maxfev = 100*(n+1);
@@ -162,7 +167,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::hybrj1(
}
template<typename FunctorType, typename Scalar>
-typename HybridNonLinearSolver<FunctorType,Scalar>::Status
+HybridNonLinearSolverSpace::Status
HybridNonLinearSolver<FunctorType,Scalar>::solveInit(
FVectorType &x,
const int mode
@@ -173,7 +178,6 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveInit(
wa1.resize(n); wa2.resize(n); wa3.resize(n); wa4.resize(n);
fvec.resize(n);
qtf.resize(n);
- R.resize( (n*(n+1))/2);
fjac.resize(n, n);
if (mode != 2)
diag.resize(n);
@@ -184,47 +188,45 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveInit(
njev = 0;
/* check the input parameters for errors. */
-
if (n <= 0 || parameters.xtol < 0. || parameters.maxfev <= 0 || parameters.factor <= 0. )
- return ImproperInputParameters;
+ return HybridNonLinearSolverSpace::ImproperInputParameters;
if (mode == 2)
for (int j = 0; j < n; ++j)
if (diag[j] <= 0.)
- return ImproperInputParameters;
+ return HybridNonLinearSolverSpace::ImproperInputParameters;
/* evaluate the function at the starting point */
/* and calculate its norm. */
-
nfev = 1;
if ( functor(x, fvec) < 0)
- return UserAksed;
+ return HybridNonLinearSolverSpace::UserAksed;
fnorm = fvec.stableNorm();
/* initialize iteration counter and monitors. */
-
iter = 1;
ncsuc = 0;
ncfail = 0;
nslow1 = 0;
nslow2 = 0;
- return Running;
+ return HybridNonLinearSolverSpace::Running;
}
template<typename FunctorType, typename Scalar>
-typename HybridNonLinearSolver<FunctorType,Scalar>::Status
+HybridNonLinearSolverSpace::Status
HybridNonLinearSolver<FunctorType,Scalar>::solveOneStep(
FVectorType &x,
const int mode
)
{
- int i, j, l, iwa[1];
+ int j;
+ std::vector<PlanarRotation<Scalar> > v_givens(n), w_givens(n);
+
jeval = true;
/* calculate the jacobian matrix. */
-
if ( functor.df(x, fjac) < 0)
- return UserAksed;
+ return HybridNonLinearSolverSpace::UserAksed;
++njev;
wa2 = fjac.colwise().blueNorm();
@@ -233,118 +235,71 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveOneStep(
/* to the norms of the columns of the initial jacobian. */
if (iter == 1) {
if (mode != 2)
- for (j = 0; j < n; ++j) {
- diag[j] = wa2[j];
- if (wa2[j] == 0.)
- diag[j] = 1.;
- }
+ for (j = 0; j < n; ++j)
+ diag[j] = (wa2[j]==0.) ? 1. : wa2[j];
/* on the first iteration, calculate the norm of the scaled x */
/* and initialize the step bound delta. */
- wa3 = diag.cwiseProduct(x);
- xnorm = wa3.stableNorm();
+ xnorm = diag.cwiseProduct(x).stableNorm();
delta = parameters.factor * xnorm;
if (delta == 0.)
delta = parameters.factor;
}
/* compute the qr factorization of the jacobian. */
- ei_qrfac<Scalar>(n, n, fjac.data(), fjac.rows(), false, iwa, wa1.data());
-
- /* form (q transpose)*fvec and store in qtf. */
-
- qtf = fvec;
- for (j = 0; j < n; ++j)
- if (fjac(j,j) != 0.) {
- sum = 0.;
- for (i = j; i < n; ++i)
- sum += fjac(i,j) * qtf[i];
- temp = -sum / fjac(j,j);
- for (i = j; i < n; ++i)
- qtf[i] += fjac(i,j) * temp;
- }
+ wa2 = fjac.colwise().blueNorm();
+ HouseholderQR<JacobianType> qrfac(fjac); // no pivoting:
/* copy the triangular factor of the qr factorization into r. */
-
- sing = false;
- for (j = 0; j < n; ++j) {
- l = j;
- for (i = 0; i < j; ++i) {
- R[l] = fjac(i,j);
- l = l + n - i -1;
- }
- R[l] = wa1[j];
- if (wa1[j] == 0.)
- sing = true;
- }
+ R = qrfac.matrixQR();
/* accumulate the orthogonal factor in fjac. */
- ei_qform<Scalar>(n, n, fjac.data(), fjac.rows(), wa1.data());
+ fjac = qrfac.householderQ();
- /* rescale if necessary. */
+ /* form (q transpose)*fvec and store in qtf. */
+ qtf = fjac.transpose() * fvec;
- /* Computing MAX */
+ /* rescale if necessary. */
if (mode != 2)
diag = diag.cwiseMax(wa2);
- /* beginning of the inner loop. */
-
while (true) {
-
/* determine the direction p. */
-
ei_dogleg<Scalar>(R, diag, qtf, delta, wa1);
/* store the direction p and x + p. calculate the norm of p. */
-
wa1 = -wa1;
wa2 = x + wa1;
- wa3 = diag.cwiseProduct(wa1);
- pnorm = wa3.stableNorm();
+ pnorm = diag.cwiseProduct(wa1).stableNorm();
/* on the first iteration, adjust the initial step bound. */
-
if (iter == 1)
delta = std::min(delta,pnorm);
/* evaluate the function at x + p and calculate its norm. */
-
if ( functor(wa2, wa4) < 0)
- return UserAksed;
+ return HybridNonLinearSolverSpace::UserAksed;
++nfev;
fnorm1 = wa4.stableNorm();
/* compute the scaled actual reduction. */
-
actred = -1.;
if (fnorm1 < fnorm) /* Computing 2nd power */
actred = 1. - ei_abs2(fnorm1 / fnorm);
/* compute the scaled predicted reduction. */
-
- l = 0;
- for (i = 0; i < n; ++i) {
- sum = 0.;
- for (j = i; j < n; ++j) {
- sum += R[l] * wa1[j];
- ++l;
- }
- wa3[i] = qtf[i] + sum;
- }
+ wa3 = R.template triangularView<Upper>()*wa1 + qtf;
temp = wa3.stableNorm();
prered = 0.;
if (temp < fnorm) /* Computing 2nd power */
prered = 1. - ei_abs2(temp / fnorm);
- /* compute the ratio of the actual to the predicted */
- /* reduction. */
-
+ /* compute the ratio of the actual to the predicted reduction. */
ratio = 0.;
if (prered > 0.)
ratio = actred / prered;
/* update the step bound. */
-
if (ratio < Scalar(.1)) {
ncsuc = 0;
++ncfail;
@@ -352,7 +307,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveOneStep(
} else {
ncfail = 0;
++ncsuc;
- if (ratio >= Scalar(.5) || ncsuc > 1) /* Computing MAX */
+ if (ratio >= Scalar(.5) || ncsuc > 1)
delta = std::max(delta, pnorm / Scalar(.5));
if (ei_abs(ratio - 1.) <= Scalar(.1)) {
delta = pnorm / Scalar(.5);
@@ -360,7 +315,6 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveOneStep(
}
/* test for successful iteration. */
-
if (ratio >= Scalar(1e-4)) {
/* successful iteration. update x, fvec, and their norms. */
x = wa2;
@@ -372,7 +326,6 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveOneStep(
}
/* determine the progress of the iteration. */
-
++nslow1;
if (actred >= Scalar(.001))
nslow1 = 0;
@@ -382,62 +335,50 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveOneStep(
nslow2 = 0;
/* test for convergence. */
-
if (delta <= parameters.xtol * xnorm || fnorm == 0.)
- return RelativeErrorTooSmall;
+ return HybridNonLinearSolverSpace::RelativeErrorTooSmall;
/* tests for termination and stringent tolerances. */
-
if (nfev >= parameters.maxfev)
- return TooManyFunctionEvaluation;
- if (Scalar(.1) * std::max(Scalar(.1) * delta, pnorm) <= epsilon<Scalar>() * xnorm)
- return TolTooSmall;
+ return HybridNonLinearSolverSpace::TooManyFunctionEvaluation;
+ if (Scalar(.1) * std::max(Scalar(.1) * delta, pnorm) <= NumTraits<Scalar>::epsilon() * xnorm)
+ return HybridNonLinearSolverSpace::TolTooSmall;
if (nslow2 == 5)
- return NotMakingProgressJacobian;
+ return HybridNonLinearSolverSpace::NotMakingProgressJacobian;
if (nslow1 == 10)
- return NotMakingProgressIterations;
+ return HybridNonLinearSolverSpace::NotMakingProgressIterations;
/* criterion for recalculating jacobian. */
-
if (ncfail == 2)
break; // leave inner loop and go for the next outer loop iteration
/* calculate the rank one modification to the jacobian */
/* and update qtf if necessary. */
-
- for (j = 0; j < n; ++j) {
- sum = wa4.dot(fjac.col(j));
- wa2[j] = (sum - wa3[j]) / pnorm;
- wa1[j] = diag[j] * (diag[j] * wa1[j] / pnorm);
- if (ratio >= Scalar(1e-4))
- qtf[j] = sum;
- }
+ wa1 = diag.cwiseProduct( diag.cwiseProduct(wa1)/pnorm );
+ wa2 = fjac.transpose() * wa4;
+ if (ratio >= Scalar(1e-4))
+ qtf = wa2;
+ wa2 = (wa2-wa3)/pnorm;
/* compute the qr factorization of the updated jacobian. */
-
- ei_r1updt<Scalar>(n, n, R.data(), R.size(), wa1.data(), wa2.data(), wa3.data(), &sing);
- ei_r1mpyq<Scalar>(n, n, fjac.data(), fjac.rows(), wa2.data(), wa3.data());
- ei_r1mpyq<Scalar>(1, n, qtf.data(), 1, wa2.data(), wa3.data());
-
- /* end of the inner loop. */
+ ei_r1updt<Scalar>(R, wa1, v_givens, w_givens, wa2, wa3, &sing);
+ ei_r1mpyq<Scalar>(n, n, fjac.data(), v_givens, w_givens);
+ ei_r1mpyq<Scalar>(1, n, qtf.data(), v_givens, w_givens);
jeval = false;
}
- /* end of the outer loop. */
-
- return Running;
+ return HybridNonLinearSolverSpace::Running;
}
-
template<typename FunctorType, typename Scalar>
-typename HybridNonLinearSolver<FunctorType,Scalar>::Status
+HybridNonLinearSolverSpace::Status
HybridNonLinearSolver<FunctorType,Scalar>::solve(
FVectorType &x,
const int mode
)
{
- Status status = solveInit(x, mode);
- while (status==Running)
+ HybridNonLinearSolverSpace::Status status = solveInit(x, mode);
+ while (status==HybridNonLinearSolverSpace::Running)
status = solveOneStep(x, mode);
return status;
}
@@ -445,7 +386,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solve(
template<typename FunctorType, typename Scalar>
-typename HybridNonLinearSolver<FunctorType,Scalar>::Status
+HybridNonLinearSolverSpace::Status
HybridNonLinearSolver<FunctorType,Scalar>::hybrd1(
FVectorType &x,
const Scalar tol
@@ -455,7 +396,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::hybrd1(
/* check the input parameters for errors. */
if (n <= 0 || tol < 0.)
- return ImproperInputParameters;
+ return HybridNonLinearSolverSpace::ImproperInputParameters;
resetParameters();
parameters.maxfev = 200*(n+1);
@@ -469,7 +410,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::hybrd1(
}
template<typename FunctorType, typename Scalar>
-typename HybridNonLinearSolver<FunctorType,Scalar>::Status
+HybridNonLinearSolverSpace::Status
HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffInit(
FVectorType &x,
const int mode
@@ -482,7 +423,6 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffInit(
wa1.resize(n); wa2.resize(n); wa3.resize(n); wa4.resize(n);
qtf.resize(n);
- R.resize( (n*(n+1))/2);
fjac.resize(n, n);
fvec.resize(n);
if (mode != 2)
@@ -491,54 +431,51 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffInit(
/* Function Body */
-
nfev = 0;
njev = 0;
/* check the input parameters for errors. */
-
if (n <= 0 || parameters.xtol < 0. || parameters.maxfev <= 0 || parameters.nb_of_subdiagonals< 0 || parameters.nb_of_superdiagonals< 0 || parameters.factor <= 0. )
- return ImproperInputParameters;
+ return HybridNonLinearSolverSpace::ImproperInputParameters;
if (mode == 2)
for (int j = 0; j < n; ++j)
if (diag[j] <= 0.)
- return ImproperInputParameters;
+ return HybridNonLinearSolverSpace::ImproperInputParameters;
/* evaluate the function at the starting point */
/* and calculate its norm. */
-
nfev = 1;
if ( functor(x, fvec) < 0)
- return UserAksed;
+ return HybridNonLinearSolverSpace::UserAksed;
fnorm = fvec.stableNorm();
/* initialize iteration counter and monitors. */
-
iter = 1;
ncsuc = 0;
ncfail = 0;
nslow1 = 0;
nslow2 = 0;
- return Running;
+ return HybridNonLinearSolverSpace::Running;
}
template<typename FunctorType, typename Scalar>
-typename HybridNonLinearSolver<FunctorType,Scalar>::Status
+HybridNonLinearSolverSpace::Status
HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffOneStep(
FVectorType &x,
const int mode
)
{
- int i, j, l, iwa[1];
+ int j;
+ std::vector<PlanarRotation<Scalar> > v_givens(n), w_givens(n);
+
jeval = true;
if (parameters.nb_of_subdiagonals<0) parameters.nb_of_subdiagonals= n-1;
if (parameters.nb_of_superdiagonals<0) parameters.nb_of_superdiagonals= n-1;
/* calculate the jacobian matrix. */
-
if (ei_fdjac1(functor, x, fvec, fjac, parameters.nb_of_subdiagonals, parameters.nb_of_superdiagonals, parameters.epsfcn) <0)
- return UserAksed;
+ return HybridNonLinearSolverSpace::UserAksed;
nfev += std::min(parameters.nb_of_subdiagonals+parameters.nb_of_superdiagonals+ 1, n);
wa2 = fjac.colwise().blueNorm();
@@ -547,118 +484,71 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffOneStep(
/* to the norms of the columns of the initial jacobian. */
if (iter == 1) {
if (mode != 2)
- for (j = 0; j < n; ++j) {
- diag[j] = wa2[j];
- if (wa2[j] == 0.)
- diag[j] = 1.;
- }
+ for (j = 0; j < n; ++j)
+ diag[j] = (wa2[j]==0.) ? 1. : wa2[j];
/* on the first iteration, calculate the norm of the scaled x */
/* and initialize the step bound delta. */
- wa3 = diag.cwiseProduct(x);
- xnorm = wa3.stableNorm();
+ xnorm = diag.cwiseProduct(x).stableNorm();
delta = parameters.factor * xnorm;
if (delta == 0.)
delta = parameters.factor;
}
/* compute the qr factorization of the jacobian. */
- ei_qrfac<Scalar>(n, n, fjac.data(), fjac.rows(), false, iwa, wa1.data());
-
- /* form (q transpose)*fvec and store in qtf. */
-
- qtf = fvec;
- for (j = 0; j < n; ++j)
- if (fjac(j,j) != 0.) {
- sum = 0.;
- for (i = j; i < n; ++i)
- sum += fjac(i,j) * qtf[i];
- temp = -sum / fjac(j,j);
- for (i = j; i < n; ++i)
- qtf[i] += fjac(i,j) * temp;
- }
+ wa2 = fjac.colwise().blueNorm();
+ HouseholderQR<JacobianType> qrfac(fjac); // no pivoting:
/* copy the triangular factor of the qr factorization into r. */
-
- sing = false;
- for (j = 0; j < n; ++j) {
- l = j;
- for (i = 0; i < j; ++i) {
- R[l] = fjac(i,j);
- l = l + n - i -1;
- }
- R[l] = wa1[j];
- if (wa1[j] == 0.)
- sing = true;
- }
+ R = qrfac.matrixQR();
/* accumulate the orthogonal factor in fjac. */
- ei_qform<Scalar>(n, n, fjac.data(), fjac.rows(), wa1.data());
+ fjac = qrfac.householderQ();
- /* rescale if necessary. */
+ /* form (q transpose)*fvec and store in qtf. */
+ qtf = fjac.transpose() * fvec;
- /* Computing MAX */
+ /* rescale if necessary. */
if (mode != 2)
diag = diag.cwiseMax(wa2);
- /* beginning of the inner loop. */
-
while (true) {
-
/* determine the direction p. */
-
ei_dogleg<Scalar>(R, diag, qtf, delta, wa1);
/* store the direction p and x + p. calculate the norm of p. */
-
wa1 = -wa1;
wa2 = x + wa1;
- wa3 = diag.cwiseProduct(wa1);
- pnorm = wa3.stableNorm();
+ pnorm = diag.cwiseProduct(wa1).stableNorm();
/* on the first iteration, adjust the initial step bound. */
-
if (iter == 1)
delta = std::min(delta,pnorm);
/* evaluate the function at x + p and calculate its norm. */
-
if ( functor(wa2, wa4) < 0)
- return UserAksed;
+ return HybridNonLinearSolverSpace::UserAksed;
++nfev;
fnorm1 = wa4.stableNorm();
/* compute the scaled actual reduction. */
-
actred = -1.;
if (fnorm1 < fnorm) /* Computing 2nd power */
actred = 1. - ei_abs2(fnorm1 / fnorm);
/* compute the scaled predicted reduction. */
-
- l = 0;
- for (i = 0; i < n; ++i) {
- sum = 0.;
- for (j = i; j < n; ++j) {
- sum += R[l] * wa1[j];
- ++l;
- }
- wa3[i] = qtf[i] + sum;
- }
+ wa3 = R.template triangularView<Upper>()*wa1 + qtf;
temp = wa3.stableNorm();
prered = 0.;
if (temp < fnorm) /* Computing 2nd power */
prered = 1. - ei_abs2(temp / fnorm);
- /* compute the ratio of the actual to the predicted */
- /* reduction. */
-
+ /* compute the ratio of the actual to the predicted reduction. */
ratio = 0.;
if (prered > 0.)
ratio = actred / prered;
/* update the step bound. */
-
if (ratio < Scalar(.1)) {
ncsuc = 0;
++ncfail;
@@ -666,7 +556,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffOneStep(
} else {
ncfail = 0;
++ncsuc;
- if (ratio >= Scalar(.5) || ncsuc > 1) /* Computing MAX */
+ if (ratio >= Scalar(.5) || ncsuc > 1)
delta = std::max(delta, pnorm / Scalar(.5));
if (ei_abs(ratio - 1.) <= Scalar(.1)) {
delta = pnorm / Scalar(.5);
@@ -674,7 +564,6 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffOneStep(
}
/* test for successful iteration. */
-
if (ratio >= Scalar(1e-4)) {
/* successful iteration. update x, fvec, and their norms. */
x = wa2;
@@ -686,7 +575,6 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffOneStep(
}
/* determine the progress of the iteration. */
-
++nslow1;
if (actred >= Scalar(.001))
nslow1 = 0;
@@ -696,62 +584,50 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffOneStep(
nslow2 = 0;
/* test for convergence. */
-
if (delta <= parameters.xtol * xnorm || fnorm == 0.)
- return RelativeErrorTooSmall;
+ return HybridNonLinearSolverSpace::RelativeErrorTooSmall;
/* tests for termination and stringent tolerances. */
-
if (nfev >= parameters.maxfev)
- return TooManyFunctionEvaluation;
- if (Scalar(.1) * std::max(Scalar(.1) * delta, pnorm) <= epsilon<Scalar>() * xnorm)
- return TolTooSmall;
+ return HybridNonLinearSolverSpace::TooManyFunctionEvaluation;
+ if (Scalar(.1) * std::max(Scalar(.1) * delta, pnorm) <= NumTraits<Scalar>::epsilon() * xnorm)
+ return HybridNonLinearSolverSpace::TolTooSmall;
if (nslow2 == 5)
- return NotMakingProgressJacobian;
+ return HybridNonLinearSolverSpace::NotMakingProgressJacobian;
if (nslow1 == 10)
- return NotMakingProgressIterations;
-
- /* criterion for recalculating jacobian approximation */
- /* by forward differences. */
+ return HybridNonLinearSolverSpace::NotMakingProgressIterations;
+ /* criterion for recalculating jacobian. */
if (ncfail == 2)
break; // leave inner loop and go for the next outer loop iteration
/* calculate the rank one modification to the jacobian */
/* and update qtf if necessary. */
-
- for (j = 0; j < n; ++j) {
- sum = wa4.dot(fjac.col(j));
- wa2[j] = (sum - wa3[j]) / pnorm;
- wa1[j] = diag[j] * (diag[j] * wa1[j] / pnorm);
- if (ratio >= Scalar(1e-4))
- qtf[j] = sum;
- }
+ wa1 = diag.cwiseProduct( diag.cwiseProduct(wa1)/pnorm );
+ wa2 = fjac.transpose() * wa4;
+ if (ratio >= Scalar(1e-4))
+ qtf = wa2;
+ wa2 = (wa2-wa3)/pnorm;
/* compute the qr factorization of the updated jacobian. */
-
- ei_r1updt<Scalar>(n, n, R.data(), R.size(), wa1.data(), wa2.data(), wa3.data(), &sing);
- ei_r1mpyq<Scalar>(n, n, fjac.data(), fjac.rows(), wa2.data(), wa3.data());
- ei_r1mpyq<Scalar>(1, n, qtf.data(), 1, wa2.data(), wa3.data());
-
- /* end of the inner loop. */
+ ei_r1updt<Scalar>(R, wa1, v_givens, w_givens, wa2, wa3, &sing);
+ ei_r1mpyq<Scalar>(n, n, fjac.data(), v_givens, w_givens);
+ ei_r1mpyq<Scalar>(1, n, qtf.data(), v_givens, w_givens);
jeval = false;
}
- /* end of the outer loop. */
-
- return Running;
+ return HybridNonLinearSolverSpace::Running;
}
template<typename FunctorType, typename Scalar>
-typename HybridNonLinearSolver<FunctorType,Scalar>::Status
+HybridNonLinearSolverSpace::Status
HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiff(
FVectorType &x,
const int mode
)
{
- Status status = solveNumericalDiffInit(x, mode);
- while (status==Running)
+ HybridNonLinearSolverSpace::Status status = solveNumericalDiffInit(x, mode);
+ while (status==HybridNonLinearSolverSpace::Running)
status = solveNumericalDiffOneStep(x, mode);
return status;
}
diff --git a/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h b/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h
index 8df48d2ab..f21331e3e 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h
@@ -28,21 +28,8 @@
#ifndef EIGEN_LEVENBERGMARQUARDT__H
#define EIGEN_LEVENBERGMARQUARDT__H
-/**
- * \ingroup NonLinearOptimization_Module
- * \brief Performs non linear optimization over a non-linear function,
- * using a variant of the Levenberg Marquardt algorithm.
- *
- * Check wikipedia for more information.
- * http://en.wikipedia.org/wiki/Levenberg%E2%80%93Marquardt_algorithm
- */
-template<typename FunctorType, typename Scalar=double>
-class LevenbergMarquardt
-{
-public:
- LevenbergMarquardt(FunctorType &_functor)
- : functor(_functor) { nfev = njev = iter = 0; fnorm=gnorm = 0.; }
+namespace LevenbergMarquardtSpace {
enum Status {
NotStarted = -2,
Running = -1,
@@ -57,13 +44,31 @@ public:
GtolTooSmall = 8,
UserAsked = 9
};
+}
+
+
+
+/**
+ * \ingroup NonLinearOptimization_Module
+ * \brief Performs non linear optimization over a non-linear function,
+ * using a variant of the Levenberg Marquardt algorithm.
+ *
+ * Check wikipedia for more information.
+ * http://en.wikipedia.org/wiki/Levenberg%E2%80%93Marquardt_algorithm
+ */
+template<typename FunctorType, typename Scalar=double>
+class LevenbergMarquardt
+{
+public:
+ LevenbergMarquardt(FunctorType &_functor)
+ : functor(_functor) { nfev = njev = iter = 0; fnorm=gnorm = 0.; }
struct Parameters {
Parameters()
: factor(Scalar(100.))
, maxfev(400)
- , ftol(ei_sqrt(epsilon<Scalar>()))
- , xtol(ei_sqrt(epsilon<Scalar>()))
+ , ftol(ei_sqrt(NumTraits<Scalar>::epsilon()))
+ , xtol(ei_sqrt(NumTraits<Scalar>::epsilon()))
, gtol(Scalar(0.))
, epsfcn(Scalar(0.)) {}
Scalar factor;
@@ -77,45 +82,45 @@ public:
typedef Matrix< Scalar, Dynamic, 1 > FVectorType;
typedef Matrix< Scalar, Dynamic, Dynamic > JacobianType;
- Status lmder1(
+ LevenbergMarquardtSpace::Status lmder1(
FVectorType &x,
- const Scalar tol = ei_sqrt(epsilon<Scalar>())
+ const Scalar tol = ei_sqrt(NumTraits<Scalar>::epsilon())
);
- Status minimize(
+ LevenbergMarquardtSpace::Status minimize(
FVectorType &x,
const int mode=1
);
- Status minimizeInit(
+ LevenbergMarquardtSpace::Status minimizeInit(
FVectorType &x,
const int mode=1
);
- Status minimizeOneStep(
+ LevenbergMarquardtSpace::Status minimizeOneStep(
FVectorType &x,
const int mode=1
);
- static Status lmdif1(
+ static LevenbergMarquardtSpace::Status lmdif1(
FunctorType &functor,
FVectorType &x,
int *nfev,
- const Scalar tol = ei_sqrt(epsilon<Scalar>())
+ const Scalar tol = ei_sqrt(NumTraits<Scalar>::epsilon())
);
- Status lmstr1(
+ LevenbergMarquardtSpace::Status lmstr1(
FVectorType &x,
- const Scalar tol = ei_sqrt(epsilon<Scalar>())
+ const Scalar tol = ei_sqrt(NumTraits<Scalar>::epsilon())
);
- Status minimizeOptimumStorage(
+ LevenbergMarquardtSpace::Status minimizeOptimumStorage(
FVectorType &x,
const int mode=1
);
- Status minimizeOptimumStorageInit(
+ LevenbergMarquardtSpace::Status minimizeOptimumStorageInit(
FVectorType &x,
const int mode=1
);
- Status minimizeOptimumStorageOneStep(
+ LevenbergMarquardtSpace::Status minimizeOptimumStorageOneStep(
FVectorType &x,
const int mode=1
);
@@ -125,7 +130,7 @@ public:
Parameters parameters;
FVectorType fvec, qtf, diag;
JacobianType fjac;
- VectorXi ipvt;
+ PermutationMatrix<Dynamic,Dynamic> permutation;
int nfev;
int njev;
int iter;
@@ -146,7 +151,7 @@ private:
};
template<typename FunctorType, typename Scalar>
-typename LevenbergMarquardt<FunctorType,Scalar>::Status
+LevenbergMarquardtSpace::Status
LevenbergMarquardt<FunctorType,Scalar>::lmder1(
FVectorType &x,
const Scalar tol
@@ -157,7 +162,7 @@ LevenbergMarquardt<FunctorType,Scalar>::lmder1(
/* check the input parameters for errors. */
if (n <= 0 || m < n || tol < 0.)
- return ImproperInputParameters;
+ return LevenbergMarquardtSpace::ImproperInputParameters;
resetParameters();
parameters.ftol = tol;
@@ -169,21 +174,21 @@ LevenbergMarquardt<FunctorType,Scalar>::lmder1(
template<typename FunctorType, typename Scalar>
-typename LevenbergMarquardt<FunctorType,Scalar>::Status
+LevenbergMarquardtSpace::Status
LevenbergMarquardt<FunctorType,Scalar>::minimize(
FVectorType &x,
const int mode
)
{
- Status status = minimizeInit(x, mode);
+ LevenbergMarquardtSpace::Status status = minimizeInit(x, mode);
do {
status = minimizeOneStep(x, mode);
- } while (status==Running);
+ } while (status==LevenbergMarquardtSpace::Running);
return status;
}
template<typename FunctorType, typename Scalar>
-typename LevenbergMarquardt<FunctorType,Scalar>::Status
+LevenbergMarquardtSpace::Status
LevenbergMarquardt<FunctorType,Scalar>::minimizeInit(
FVectorType &x,
const int mode
@@ -195,7 +200,6 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeInit(
wa1.resize(n); wa2.resize(n); wa3.resize(n);
wa4.resize(m);
fvec.resize(m);
- ipvt.resize(n);
fjac.resize(m, n);
if (mode != 2)
diag.resize(n);
@@ -207,72 +211,62 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeInit(
njev = 0;
/* check the input parameters for errors. */
-
if (n <= 0 || m < n || parameters.ftol < 0. || parameters.xtol < 0. || parameters.gtol < 0. || parameters.maxfev <= 0 || parameters.factor <= 0.)
- return ImproperInputParameters;
+ return LevenbergMarquardtSpace::ImproperInputParameters;
if (mode == 2)
for (int j = 0; j < n; ++j)
if (diag[j] <= 0.)
- return ImproperInputParameters;
+ return LevenbergMarquardtSpace::ImproperInputParameters;
/* evaluate the function at the starting point */
/* and calculate its norm. */
-
nfev = 1;
if ( functor(x, fvec) < 0)
- return UserAsked;
+ return LevenbergMarquardtSpace::UserAsked;
fnorm = fvec.stableNorm();
/* initialize levenberg-marquardt parameter and iteration counter. */
-
par = 0.;
iter = 1;
- return NotStarted;
+ return LevenbergMarquardtSpace::NotStarted;
}
template<typename FunctorType, typename Scalar>
-typename LevenbergMarquardt<FunctorType,Scalar>::Status
+LevenbergMarquardtSpace::Status
LevenbergMarquardt<FunctorType,Scalar>::minimizeOneStep(
FVectorType &x,
const int mode
)
{
- int i, j, l;
+ int j;
/* calculate the jacobian matrix. */
-
int df_ret = functor.df(x, fjac);
if (df_ret<0)
- return UserAsked;
+ return LevenbergMarquardtSpace::UserAsked;
if (df_ret>0)
// numerical diff, we evaluated the function df_ret times
nfev += df_ret;
else njev++;
/* compute the qr factorization of the jacobian. */
-
wa2 = fjac.colwise().blueNorm();
- ei_qrfac<Scalar>(m, n, fjac.data(), fjac.rows(), true, ipvt.data(), wa1.data());
- ipvt.array() -= 1; // qrfac() creates ipvt with fortran convention (1->n), convert it to c (0->n-1)
+ ColPivHouseholderQR<JacobianType> qrfac(fjac);
+ fjac = qrfac.matrixQR();
+ permutation = qrfac.colsPermutation();
/* on the first iteration and if mode is 1, scale according */
/* to the norms of the columns of the initial jacobian. */
-
if (iter == 1) {
if (mode != 2)
- for (j = 0; j < n; ++j) {
- diag[j] = wa2[j];
- if (wa2[j] == 0.)
- diag[j] = 1.;
- }
+ for (j = 0; j < n; ++j)
+ diag[j] = (wa2[j]==0.)? 1. : wa2[j];
/* on the first iteration, calculate the norm of the scaled x */
/* and initialize the step bound delta. */
-
- wa3 = diag.cwiseProduct(x);
- xnorm = wa3.stableNorm();
+ xnorm = diag.cwiseProduct(x).stableNorm();
delta = parameters.factor * xnorm;
if (delta == 0.)
delta = parameters.factor;
@@ -280,103 +274,65 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOneStep(
/* form (q transpose)*fvec and store the first n components in */
/* qtf. */
-
wa4 = fvec;
- for (j = 0; j < n; ++j) {
- if (fjac(j,j) != 0.) {
- sum = 0.;
- for (i = j; i < m; ++i)
- sum += fjac(i,j) * wa4[i];
- temp = -sum / fjac(j,j);
- for (i = j; i < m; ++i)
- wa4[i] += fjac(i,j) * temp;
- }
- fjac(j,j) = wa1[j];
- qtf[j] = wa4[j];
- }
+ wa4.applyOnTheLeft(qrfac.householderQ().adjoint());
+ qtf = wa4.head(n);
/* compute the norm of the scaled gradient. */
-
gnorm = 0.;
if (fnorm != 0.)
- for (j = 0; j < n; ++j) {
- l = ipvt[j];
- if (wa2[l] != 0.) {
- sum = 0.;
- for (i = 0; i <= j; ++i)
- sum += fjac(i,j) * (qtf[i] / fnorm);
- /* Computing MAX */
- gnorm = std::max(gnorm, ei_abs(sum / wa2[l]));
- }
- }
+ for (j = 0; j < n; ++j)
+ if (wa2[permutation.indices()[j]] != 0.)
+ gnorm = std::max(gnorm, ei_abs( fjac.col(j).head(j+1).dot(qtf.head(j+1)/fnorm) / wa2[permutation.indices()[j]]));
/* test for convergence of the gradient norm. */
-
if (gnorm <= parameters.gtol)
- return CosinusTooSmall;
+ return LevenbergMarquardtSpace::CosinusTooSmall;
/* rescale if necessary. */
-
- if (mode != 2) /* Computing MAX */
+ if (mode != 2)
diag = diag.cwiseMax(wa2);
- /* beginning of the inner loop. */
do {
/* determine the levenberg-marquardt parameter. */
-
- ei_lmpar<Scalar>(fjac, ipvt, diag, qtf, delta, par, wa1);
+ ei_lmpar2<Scalar>(qrfac, diag, qtf, delta, par, wa1);
/* store the direction p and x + p. calculate the norm of p. */
-
wa1 = -wa1;
wa2 = x + wa1;
- wa3 = diag.cwiseProduct(wa1);
- pnorm = wa3.stableNorm();
+ pnorm = diag.cwiseProduct(wa1).stableNorm();
/* on the first iteration, adjust the initial step bound. */
-
if (iter == 1)
delta = std::min(delta,pnorm);
/* evaluate the function at x + p and calculate its norm. */
-
if ( functor(wa2, wa4) < 0)
- return UserAsked;
+ return LevenbergMarquardtSpace::UserAsked;
++nfev;
fnorm1 = wa4.stableNorm();
/* compute the scaled actual reduction. */
-
actred = -1.;
- if (Scalar(.1) * fnorm1 < fnorm) /* Computing 2nd power */
+ if (Scalar(.1) * fnorm1 < fnorm)
actred = 1. - ei_abs2(fnorm1 / fnorm);
/* compute the scaled predicted reduction and */
/* the scaled directional derivative. */
-
- wa3.fill(0.);
- for (j = 0; j < n; ++j) {
- l = ipvt[j];
- temp = wa1[l];
- for (i = 0; i <= j; ++i)
- wa3[i] += fjac(i,j) * temp;
- }
+ wa3 = fjac.template triangularView<Upper>() * (qrfac.colsPermutation().inverse() *wa1);
temp1 = ei_abs2(wa3.stableNorm() / fnorm);
temp2 = ei_abs2(ei_sqrt(par) * pnorm / fnorm);
- /* Computing 2nd power */
prered = temp1 + temp2 / Scalar(.5);
dirder = -(temp1 + temp2);
/* compute the ratio of the actual to the predicted */
/* reduction. */
-
ratio = 0.;
if (prered != 0.)
ratio = actred / prered;
/* update the step bound. */
-
if (ratio <= Scalar(.25)) {
if (actred >= 0.)
temp = Scalar(.5);
@@ -393,7 +349,6 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOneStep(
}
/* test for successful iteration. */
-
if (ratio >= Scalar(1e-4)) {
/* successful iteration. update x, fvec, and their norms. */
x = wa2;
@@ -405,32 +360,30 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOneStep(
}
/* tests for convergence. */
-
if (ei_abs(actred) <= parameters.ftol && prered <= parameters.ftol && Scalar(.5) * ratio <= 1. && delta <= parameters.xtol * xnorm)
- return RelativeErrorAndReductionTooSmall;
+ return LevenbergMarquardtSpace::RelativeErrorAndReductionTooSmall;
if (ei_abs(actred) <= parameters.ftol && prered <= parameters.ftol && Scalar(.5) * ratio <= 1.)
- return RelativeReductionTooSmall;
+ return LevenbergMarquardtSpace::RelativeReductionTooSmall;
if (delta <= parameters.xtol * xnorm)
- return RelativeErrorTooSmall;
+ return LevenbergMarquardtSpace::RelativeErrorTooSmall;
/* tests for termination and stringent tolerances. */
-
if (nfev >= parameters.maxfev)
- return TooManyFunctionEvaluation;
- if (ei_abs(actred) <= epsilon<Scalar>() && prered <= epsilon<Scalar>() && Scalar(.5) * ratio <= 1.)
- return FtolTooSmall;
- if (delta <= epsilon<Scalar>() * xnorm)
- return XtolTooSmall;
- if (gnorm <= epsilon<Scalar>())
- return GtolTooSmall;
- /* end of the inner loop. repeat if iteration unsuccessful. */
+ return LevenbergMarquardtSpace::TooManyFunctionEvaluation;
+ if (ei_abs(actred) <= NumTraits<Scalar>::epsilon() && prered <= NumTraits<Scalar>::epsilon() && Scalar(.5) * ratio <= 1.)
+ return LevenbergMarquardtSpace::FtolTooSmall;
+ if (delta <= NumTraits<Scalar>::epsilon() * xnorm)
+ return LevenbergMarquardtSpace::XtolTooSmall;
+ if (gnorm <= NumTraits<Scalar>::epsilon())
+ return LevenbergMarquardtSpace::GtolTooSmall;
+
} while (ratio < Scalar(1e-4));
- /* end of the outer loop. */
- return Running;
+
+ return LevenbergMarquardtSpace::Running;
}
template<typename FunctorType, typename Scalar>
-typename LevenbergMarquardt<FunctorType,Scalar>::Status
+LevenbergMarquardtSpace::Status
LevenbergMarquardt<FunctorType,Scalar>::lmstr1(
FVectorType &x,
const Scalar tol
@@ -441,7 +394,7 @@ LevenbergMarquardt<FunctorType,Scalar>::lmstr1(
/* check the input parameters for errors. */
if (n <= 0 || m < n || tol < 0.)
- return ImproperInputParameters;
+ return LevenbergMarquardtSpace::ImproperInputParameters;
resetParameters();
parameters.ftol = tol;
@@ -452,7 +405,7 @@ LevenbergMarquardt<FunctorType,Scalar>::lmstr1(
}
template<typename FunctorType, typename Scalar>
-typename LevenbergMarquardt<FunctorType,Scalar>::Status
+LevenbergMarquardtSpace::Status
LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageInit(
FVectorType &x,
const int mode
@@ -464,8 +417,12 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageInit(
wa1.resize(n); wa2.resize(n); wa3.resize(n);
wa4.resize(m);
fvec.resize(m);
- ipvt.resize(n);
- fjac.resize(m, n);
+ // Only R is stored in fjac. Q is only used to compute 'qtf', which is
+ // Q.transpose()*rhs. qtf will be updated using givens rotation,
+ // instead of storing them in Q.
+ // The purpose it to only use a nxn matrix, instead of mxn here, so
+ // that we can handle cases where m>>n :
+ fjac.resize(n, n);
if (mode != 2)
diag.resize(n);
assert( (mode!=2 || diag.size()==n) || "When using mode==2, the caller must provide a valid 'diag'");
@@ -476,74 +433,74 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageInit(
njev = 0;
/* check the input parameters for errors. */
-
if (n <= 0 || m < n || parameters.ftol < 0. || parameters.xtol < 0. || parameters.gtol < 0. || parameters.maxfev <= 0 || parameters.factor <= 0.)
- return ImproperInputParameters;
+ return LevenbergMarquardtSpace::ImproperInputParameters;
if (mode == 2)
for (int j = 0; j < n; ++j)
if (diag[j] <= 0.)
- return ImproperInputParameters;
+ return LevenbergMarquardtSpace::ImproperInputParameters;
/* evaluate the function at the starting point */
/* and calculate its norm. */
-
nfev = 1;
if ( functor(x, fvec) < 0)
- return UserAsked;
+ return LevenbergMarquardtSpace::UserAsked;
fnorm = fvec.stableNorm();
/* initialize levenberg-marquardt parameter and iteration counter. */
-
par = 0.;
iter = 1;
- return NotStarted;
+ return LevenbergMarquardtSpace::NotStarted;
}
template<typename FunctorType, typename Scalar>
-typename LevenbergMarquardt<FunctorType,Scalar>::Status
+LevenbergMarquardtSpace::Status
LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageOneStep(
FVectorType &x,
const int mode
)
{
- int i, j, l;
+ int i, j;
bool sing;
/* compute the qr factorization of the jacobian matrix */
/* calculated one row at a time, while simultaneously */
/* forming (q transpose)*fvec and storing the first */
/* n components in qtf. */
-
qtf.fill(0.);
fjac.fill(0.);
int rownb = 2;
for (i = 0; i < m; ++i) {
- if (functor.df(x, wa3, rownb) < 0) return UserAsked;
- temp = fvec[i];
- ei_rwupdt<Scalar>(n, fjac.data(), fjac.rows(), wa3.data(), qtf.data(), &temp, wa1.data(), wa2.data());
+ if (functor.df(x, wa3, rownb) < 0) return LevenbergMarquardtSpace::UserAsked;
+ ei_rwupdt<Scalar>(fjac, wa3, qtf, fvec[i]);
++rownb;
}
++njev;
/* if the jacobian is rank deficient, call qrfac to */
/* reorder its columns and update the components of qtf. */
-
sing = false;
for (j = 0; j < n; ++j) {
- if (fjac(j,j) == 0.) {
+ if (fjac(j,j) == 0.)
sing = true;
- }
- ipvt[j] = j;
wa2[j] = fjac.col(j).head(j).stableNorm();
}
+ permutation.setIdentity(n);
if (sing) {
- ipvt.array() += 1;
wa2 = fjac.colwise().blueNorm();
- ei_qrfac<Scalar>(n, n, fjac.data(), fjac.rows(), true, ipvt.data(), wa1.data());
- ipvt.array() -= 1; // qrfac() creates ipvt with fortran convention (1->n), convert it to c (0->n-1)
+ // TODO We have no unit test covering this code path, do not modify
+ // until it is carefully tested
+ ColPivHouseholderQR<JacobianType> qrfac(fjac);
+ fjac = qrfac.matrixQR();
+ wa1 = fjac.diagonal();
+ fjac.diagonal() = qrfac.hCoeffs();
+ permutation = qrfac.colsPermutation();
+ // TODO : avoid this:
+ for(int ii=0; ii< fjac.cols(); ii++) fjac.col(ii).segment(ii+1, fjac.rows()-ii-1) *= fjac(ii,ii); // rescale vectors
+
for (j = 0; j < n; ++j) {
if (fjac(j,j) != 0.) {
sum = 0.;
@@ -559,107 +516,74 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageOneStep(
/* on the first iteration and if mode is 1, scale according */
/* to the norms of the columns of the initial jacobian. */
-
if (iter == 1) {
if (mode != 2)
- for (j = 0; j < n; ++j) {
- diag[j] = wa2[j];
- if (wa2[j] == 0.)
- diag[j] = 1.;
- }
+ for (j = 0; j < n; ++j)
+ diag[j] = (wa2[j]==0.)? 1. : wa2[j];
/* on the first iteration, calculate the norm of the scaled x */
/* and initialize the step bound delta. */
-
- wa3 = diag.cwiseProduct(x);
- xnorm = wa3.stableNorm();
+ xnorm = diag.cwiseProduct(x).stableNorm();
delta = parameters.factor * xnorm;
if (delta == 0.)
delta = parameters.factor;
}
/* compute the norm of the scaled gradient. */
-
gnorm = 0.;
if (fnorm != 0.)
- for (j = 0; j < n; ++j) {
- l = ipvt[j];
- if (wa2[l] != 0.) {
- sum = 0.;
- for (i = 0; i <= j; ++i)
- sum += fjac(i,j) * (qtf[i] / fnorm);
- /* Computing MAX */
- gnorm = std::max(gnorm, ei_abs(sum / wa2[l]));
- }
- }
+ for (j = 0; j < n; ++j)
+ if (wa2[permutation.indices()[j]] != 0.)
+ gnorm = std::max(gnorm, ei_abs( fjac.col(j).head(j+1).dot(qtf.head(j+1)/fnorm) / wa2[permutation.indices()[j]]));
/* test for convergence of the gradient norm. */
-
if (gnorm <= parameters.gtol)
- return CosinusTooSmall;
+ return LevenbergMarquardtSpace::CosinusTooSmall;
/* rescale if necessary. */
-
- if (mode != 2) /* Computing MAX */
+ if (mode != 2)
diag = diag.cwiseMax(wa2);
- /* beginning of the inner loop. */
do {
/* determine the levenberg-marquardt parameter. */
-
- ei_lmpar<Scalar>(fjac, ipvt, diag, qtf, delta, par, wa1);
+ ei_lmpar<Scalar>(fjac, permutation.indices(), diag, qtf, delta, par, wa1);
/* store the direction p and x + p. calculate the norm of p. */
-
wa1 = -wa1;
wa2 = x + wa1;
- wa3 = diag.cwiseProduct(wa1);
- pnorm = wa3.stableNorm();
+ pnorm = diag.cwiseProduct(wa1).stableNorm();
/* on the first iteration, adjust the initial step bound. */
-
if (iter == 1)
delta = std::min(delta,pnorm);
/* evaluate the function at x + p and calculate its norm. */
-
if ( functor(wa2, wa4) < 0)
- return UserAsked;
+ return LevenbergMarquardtSpace::UserAsked;
++nfev;
fnorm1 = wa4.stableNorm();
/* compute the scaled actual reduction. */
-
actred = -1.;
- if (Scalar(.1) * fnorm1 < fnorm) /* Computing 2nd power */
+ if (Scalar(.1) * fnorm1 < fnorm)
actred = 1. - ei_abs2(fnorm1 / fnorm);
/* compute the scaled predicted reduction and */
/* the scaled directional derivative. */
-
- wa3.fill(0.);
- for (j = 0; j < n; ++j) {
- l = ipvt[j];
- temp = wa1[l];
- for (i = 0; i <= j; ++i)
- wa3[i] += fjac(i,j) * temp;
- }
+ wa3 = fjac.corner(TopLeft,n,n).template triangularView<Upper>() * (permutation.inverse() * wa1);
temp1 = ei_abs2(wa3.stableNorm() / fnorm);
temp2 = ei_abs2(ei_sqrt(par) * pnorm / fnorm);
- /* Computing 2nd power */
prered = temp1 + temp2 / Scalar(.5);
dirder = -(temp1 + temp2);
/* compute the ratio of the actual to the predicted */
/* reduction. */
-
ratio = 0.;
if (prered != 0.)
ratio = actred / prered;
/* update the step bound. */
-
if (ratio <= Scalar(.25)) {
if (actred >= 0.)
temp = Scalar(.5);
@@ -676,7 +600,6 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageOneStep(
}
/* test for successful iteration. */
-
if (ratio >= Scalar(1e-4)) {
/* successful iteration. update x, fvec, and their norms. */
x = wa2;
@@ -688,46 +611,44 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageOneStep(
}
/* tests for convergence. */
-
if (ei_abs(actred) <= parameters.ftol && prered <= parameters.ftol && Scalar(.5) * ratio <= 1. && delta <= parameters.xtol * xnorm)
- return RelativeErrorAndReductionTooSmall;
+ return LevenbergMarquardtSpace::RelativeErrorAndReductionTooSmall;
if (ei_abs(actred) <= parameters.ftol && prered <= parameters.ftol && Scalar(.5) * ratio <= 1.)
- return RelativeReductionTooSmall;
+ return LevenbergMarquardtSpace::RelativeReductionTooSmall;
if (delta <= parameters.xtol * xnorm)
- return RelativeErrorTooSmall;
+ return LevenbergMarquardtSpace::RelativeErrorTooSmall;
/* tests for termination and stringent tolerances. */
-
if (nfev >= parameters.maxfev)
- return TooManyFunctionEvaluation;
- if (ei_abs(actred) <= epsilon<Scalar>() && prered <= epsilon<Scalar>() && Scalar(.5) * ratio <= 1.)
- return FtolTooSmall;
- if (delta <= epsilon<Scalar>() * xnorm)
- return XtolTooSmall;
- if (gnorm <= epsilon<Scalar>())
- return GtolTooSmall;
- /* end of the inner loop. repeat if iteration unsuccessful. */
+ return LevenbergMarquardtSpace::TooManyFunctionEvaluation;
+ if (ei_abs(actred) <= NumTraits<Scalar>::epsilon() && prered <= NumTraits<Scalar>::epsilon() && Scalar(.5) * ratio <= 1.)
+ return LevenbergMarquardtSpace::FtolTooSmall;
+ if (delta <= NumTraits<Scalar>::epsilon() * xnorm)
+ return LevenbergMarquardtSpace::XtolTooSmall;
+ if (gnorm <= NumTraits<Scalar>::epsilon())
+ return LevenbergMarquardtSpace::GtolTooSmall;
+
} while (ratio < Scalar(1e-4));
- /* end of the outer loop. */
- return Running;
+
+ return LevenbergMarquardtSpace::Running;
}
template<typename FunctorType, typename Scalar>
-typename LevenbergMarquardt<FunctorType,Scalar>::Status
+LevenbergMarquardtSpace::Status
LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorage(
FVectorType &x,
const int mode
)
{
- Status status = minimizeOptimumStorageInit(x, mode);
+ LevenbergMarquardtSpace::Status status = minimizeOptimumStorageInit(x, mode);
do {
status = minimizeOptimumStorageOneStep(x, mode);
- } while (status==Running);
+ } while (status==LevenbergMarquardtSpace::Running);
return status;
}
template<typename FunctorType, typename Scalar>
-typename LevenbergMarquardt<FunctorType,Scalar>::Status
+LevenbergMarquardtSpace::Status
LevenbergMarquardt<FunctorType,Scalar>::lmdif1(
FunctorType &functor,
FVectorType &x,
@@ -740,7 +661,7 @@ LevenbergMarquardt<FunctorType,Scalar>::lmdif1(
/* check the input parameters for errors. */
if (n <= 0 || m < n || tol < 0.)
- return ImproperInputParameters;
+ return LevenbergMarquardtSpace::ImproperInputParameters;
NumericalDiff<FunctorType> numDiff(functor);
// embedded LevenbergMarquardt
@@ -749,7 +670,7 @@ LevenbergMarquardt<FunctorType,Scalar>::lmdif1(
lm.parameters.xtol = tol;
lm.parameters.maxfev = 200*(n+1);
- Status info = Status(lm.minimize(x));
+ LevenbergMarquardtSpace::Status info = LevenbergMarquardtSpace::Status(lm.minimize(x));
if (nfev)
* nfev = lm.nfev;
return info;
diff --git a/unsupported/Eigen/src/NonLinearOptimization/chkder.h b/unsupported/Eigen/src/NonLinearOptimization/chkder.h
index d5fca9c62..591e8bef7 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/chkder.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/chkder.h
@@ -4,27 +4,26 @@
template<typename Scalar>
void ei_chkder(
- Matrix< Scalar, Dynamic, 1 > &x,
- Matrix< Scalar, Dynamic, 1 > &fvec,
- Matrix< Scalar, Dynamic, Dynamic > &fjac,
+ const Matrix< Scalar, Dynamic, 1 > &x,
+ const Matrix< Scalar, Dynamic, 1 > &fvec,
+ const Matrix< Scalar, Dynamic, Dynamic > &fjac,
Matrix< Scalar, Dynamic, 1 > &xp,
- Matrix< Scalar, Dynamic, 1 > &fvecp,
+ const Matrix< Scalar, Dynamic, 1 > &fvecp,
int mode,
Matrix< Scalar, Dynamic, 1 > &err
)
{
- const Scalar eps = ei_sqrt(epsilon<Scalar>());
- const Scalar epsf = chkder_factor * epsilon<Scalar>();
+ const Scalar eps = ei_sqrt(NumTraits<Scalar>::epsilon());
+ const Scalar epsf = chkder_factor * NumTraits<Scalar>::epsilon();
const Scalar epslog = chkder_log10e * ei_log(eps);
Scalar temp;
- int i,j;
const int m = fvec.size(), n = x.size();
if (mode != 2) {
+ /* mode = 1. */
xp.resize(n);
- /* mode = 1. */
- for (j = 0; j < n; ++j) {
+ for (int j = 0; j < n; ++j) {
temp = eps * ei_abs(x[j]);
if (temp == 0.)
temp = eps;
@@ -32,24 +31,24 @@ void ei_chkder(
}
}
else {
- /* mode = 2. */
+ /* mode = 2. */
err.setZero(m);
- for (j = 0; j < n; ++j) {
+ for (int j = 0; j < n; ++j) {
temp = ei_abs(x[j]);
if (temp == 0.)
temp = 1.;
err += temp * fjac.col(j);
}
- for (i = 0; i < m; ++i) {
+ for (int i = 0; i < m; ++i) {
temp = 1.;
if (fvec[i] != 0. && fvecp[i] != 0. && ei_abs(fvecp[i] - fvec[i]) >= epsf * ei_abs(fvec[i]))
temp = eps * ei_abs((fvecp[i] - fvec[i]) / eps - err[i]) / (ei_abs(fvec[i]) + ei_abs(fvecp[i]));
err[i] = 1.;
- if (temp > epsilon<Scalar>() && temp < eps)
+ if (temp > NumTraits<Scalar>::epsilon() && temp < eps)
err[i] = (chkder_log10e * ei_log(temp) - epslog) / epslog;
if (temp >= eps)
err[i] = 0.;
}
}
-} /* chkder_ */
+}
diff --git a/unsupported/Eigen/src/NonLinearOptimization/covar.h b/unsupported/Eigen/src/NonLinearOptimization/covar.h
index 2e495cd7f..2f983a958 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/covar.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/covar.h
@@ -1,9 +1,9 @@
- template <typename Scalar>
+template <typename Scalar>
void ei_covar(
Matrix< Scalar, Dynamic, Dynamic > &r,
const VectorXi &ipvt,
- Scalar tol = ei_sqrt(epsilon<Scalar>()) )
+ Scalar tol = ei_sqrt(NumTraits<Scalar>::epsilon()) )
{
/* Local variables */
int i, j, k, l, ii, jj;
@@ -16,8 +16,7 @@ void ei_covar(
Matrix< Scalar, Dynamic, 1 > wa(n);
assert(ipvt.size()==n);
- /* form the inverse of r in the full upper triangle of r. */
-
+ /* form the inverse of r in the full upper triangle of r. */
l = -1;
for (k = 0; k < n; ++k)
if (ei_abs(r(k,k)) > tolr) {
@@ -25,29 +24,21 @@ void ei_covar(
for (j = 0; j <= k-1; ++j) {
temp = r(k,k) * r(j,k);
r(j,k) = 0.;
- for (i = 0; i <= j; ++i)
- r(i,k) -= temp * r(i,j);
+ r.col(k).head(j+1) -= r.col(j).head(j+1) * temp;
}
l = k;
}
- /* form the full upper triangle of the inverse of (r transpose)*r */
- /* in the full upper triangle of r. */
-
+ /* form the full upper triangle of the inverse of (r transpose)*r */
+ /* in the full upper triangle of r. */
for (k = 0; k <= l; ++k) {
- for (j = 0; j <= k-1; ++j) {
- temp = r(j,k);
- for (i = 0; i <= j; ++i)
- r(i,j) += temp * r(i,k);
- }
- temp = r(k,k);
- for (i = 0; i <= k; ++i)
- r(i,k) = temp * r(i,k);
+ for (j = 0; j <= k-1; ++j)
+ r.col(j).head(j+1) += r.col(k).head(j+1) * r(j,k);
+ r.col(k).head(k+1) *= r(k,k);
}
- /* form the full lower triangle of the covariance matrix */
- /* in the strict lower triangle of r and in wa. */
-
+ /* form the full lower triangle of the covariance matrix */
+ /* in the strict lower triangle of r and in wa. */
for (j = 0; j < n; ++j) {
jj = ipvt[j];
sing = j > l;
@@ -63,12 +54,8 @@ void ei_covar(
wa[jj] = r(j,j);
}
- /* symmetrize the covariance matrix in r. */
-
- for (j = 0; j < n; ++j) {
- for (i = 0; i <= j; ++i)
- r(i,j) = r(j,i);
- r(j,j) = wa[j];
- }
+ /* symmetrize the covariance matrix in r. */
+ r.corner(TopLeft,n,n).template triangularView<StrictlyUpper>() = r.corner(TopLeft,n,n).transpose();
+ r.diagonal() = wa;
}
diff --git a/unsupported/Eigen/src/NonLinearOptimization/dogleg.h b/unsupported/Eigen/src/NonLinearOptimization/dogleg.h
index dd6b39cb4..9c1d38dea 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/dogleg.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/dogleg.h
@@ -1,76 +1,58 @@
template <typename Scalar>
void ei_dogleg(
- Matrix< Scalar, Dynamic, 1 > &r,
+ const Matrix< Scalar, Dynamic, Dynamic > &qrfac,
const Matrix< Scalar, Dynamic, 1 > &diag,
const Matrix< Scalar, Dynamic, 1 > &qtb,
Scalar delta,
Matrix< Scalar, Dynamic, 1 > &x)
{
/* Local variables */
- int i, j, k, l, jj;
+ int i, j;
Scalar sum, temp, alpha, bnorm;
Scalar gnorm, qnorm;
Scalar sgnorm;
/* Function Body */
- const Scalar epsmch = epsilon<Scalar>();
- const int n = diag.size();
- Matrix< Scalar, Dynamic, 1 > wa1(n), wa2(n);
+ const Scalar epsmch = NumTraits<Scalar>::epsilon();
+ const int n = qrfac.cols();
assert(n==qtb.size());
assert(n==x.size());
+ assert(n==diag.size());
+ Matrix< Scalar, Dynamic, 1 > wa1(n), wa2(n);
/* first, calculate the gauss-newton direction. */
-
- jj = n * (n + 1) / 2;
- for (k = 0; k < n; ++k) {
- j = n - k - 1;
- jj -= k+1;
- l = jj + 1;
- sum = 0.;
- for (i = j+1; i < n; ++i) {
- sum += r[l] * x[i];
- ++l;
- }
- temp = r[jj];
+ for (j = n-1; j >=0; --j) {
+ temp = qrfac(j,j);
if (temp == 0.) {
- l = j;
- for (i = 0; i <= j; ++i) {
- /* Computing MAX */
- temp = std::max(temp,ei_abs(r[l]));
- l = l + n - i;
- }
- temp = epsmch * temp;
+ temp = epsmch * qrfac.col(j).head(j+1).maxCoeff();
if (temp == 0.)
temp = epsmch;
}
- x[j] = (qtb[j] - sum) / temp;
+ if (j==n-1)
+ x[j] = qtb[j] / temp;
+ else
+ x[j] = (qtb[j] - qrfac.row(j).tail(n-j-1).dot(x.tail(n-j-1))) / temp;
}
/* test whether the gauss-newton direction is acceptable. */
-
- wa1.fill(0.);
- wa2 = diag.cwiseProduct(x);
- qnorm = wa2.stableNorm();
+ qnorm = diag.cwiseProduct(x).stableNorm();
if (qnorm <= delta)
return;
+ // TODO : this path is not tested by Eigen unit tests
+
/* the gauss-newton direction is not acceptable. */
/* next, calculate the scaled gradient direction. */
- l = 0;
+ wa1.fill(0.);
for (j = 0; j < n; ++j) {
- temp = qtb[j];
- for (i = j; i < n; ++i) {
- wa1[i] += r[l] * temp;
- ++l;
- }
+ wa1.tail(n-j) += qrfac.row(j).tail(n-j) * qtb[j];
wa1[j] /= diag[j];
}
/* calculate the norm of the scaled gradient and test for */
/* the special case in which the scaled gradient is zero. */
-
gnorm = wa1.stableNorm();
sgnorm = 0.;
alpha = delta / qnorm;
@@ -79,24 +61,20 @@ void ei_dogleg(
/* calculate the point along the scaled gradient */
/* at which the quadratic is minimized. */
-
wa1.array() /= (diag*gnorm).array();
- l = 0;
+ // TODO : once unit tests cover this part,:
+ // wa2 = qrfac.template triangularView<Upper>() * wa1;
for (j = 0; j < n; ++j) {
sum = 0.;
for (i = j; i < n; ++i) {
- sum += r[l] * wa1[i];
- ++l;
- /* L100: */
+ sum += qrfac(j,i) * wa1[i];
}
wa2[j] = sum;
- /* L110: */
}
temp = wa2.stableNorm();
sgnorm = gnorm / temp / temp;
/* test whether the scaled gradient direction is acceptable. */
-
alpha = 0.;
if (sgnorm >= delta)
goto algo_end;
@@ -104,21 +82,15 @@ void ei_dogleg(
/* the scaled gradient direction is not acceptable. */
/* finally, calculate the point along the dogleg */
/* at which the quadratic is minimized. */
-
bnorm = qtb.stableNorm();
temp = bnorm / gnorm * (bnorm / qnorm) * (sgnorm / delta);
- /* Computing 2nd power */
temp = temp - delta / qnorm * ei_abs2(sgnorm / delta) + ei_sqrt(ei_abs2(temp - delta / qnorm) + (1.-ei_abs2(delta / qnorm)) * (1.-ei_abs2(sgnorm / delta)));
- /* Computing 2nd power */
alpha = delta / qnorm * (1. - ei_abs2(sgnorm / delta)) / temp;
algo_end:
/* form appropriate convex combination of the gauss-newton */
/* direction and the scaled gradient direction. */
-
temp = (1.-alpha) * std::min(sgnorm,delta);
x = temp * wa1 + alpha * x;
- return;
-
}
diff --git a/unsupported/Eigen/src/NonLinearOptimization/fdjac1.h b/unsupported/Eigen/src/NonLinearOptimization/fdjac1.h
index 376df7c40..3dc1e8070 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/fdjac1.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/fdjac1.h
@@ -10,19 +10,20 @@ int ei_fdjac1(
{
/* Local variables */
Scalar h;
- int i, j, k;
+ int j, k;
Scalar eps, temp;
int msum;
- int iflag = 0;
+ int iflag;
+ int start, length;
/* Function Body */
- const Scalar epsmch = epsilon<Scalar>();
+ const Scalar epsmch = NumTraits<Scalar>::epsilon();
const int n = x.size();
assert(fvec.size()==n);
Matrix< Scalar, Dynamic, 1 > wa1(n);
Matrix< Scalar, Dynamic, 1 > wa2(n);
- eps = ei_sqrt((std::max(epsfcn,epsmch)));
+ eps = ei_sqrt(std::max(epsfcn,epsmch));
msum = ml + mu + 1;
if (msum >= n) {
/* computation of dense approximate jacobian. */
@@ -42,29 +43,26 @@ int ei_fdjac1(
}else {
/* computation of banded approximate jacobian. */
for (k = 0; k < msum; ++k) {
- for (j = k; msum< 0 ? j > n: j < n; j += msum) {
+ for (j = k; (msum<0) ? (j>n): (j<n); j += msum) {
wa2[j] = x[j];
h = eps * ei_abs(wa2[j]);
if (h == 0.) h = eps;
x[j] = wa2[j] + h;
}
iflag = Functor(x, wa1);
- if (iflag < 0) {
+ if (iflag < 0)
return iflag;
- }
- for (j = k; msum< 0 ? j > n: j < n; j += msum) {
+ for (j = k; (msum<0) ? (j>n): (j<n); j += msum) {
x[j] = wa2[j];
h = eps * ei_abs(wa2[j]);
if (h == 0.) h = eps;
- for (i = 0; i < n; ++i) {
- fjac(i,j) = 0.;
- if (i >= j - mu && i <= j + ml) {
- fjac(i,j) = (wa1[i] - fvec[i]) / h;
- }
- }
+ fjac.col(j).setZero();
+ start = std::max(0,j-mu);
+ length = std::min(n-1, j+ml) - start + 1;
+ fjac.col(j).segment(start, length) = ( wa1.segment(start, length)-fvec.segment(start, length))/h;
}
}
}
- return iflag;
-} /* fdjac1_ */
+ return 0;
+}
diff --git a/unsupported/Eigen/src/NonLinearOptimization/lmpar.h b/unsupported/Eigen/src/NonLinearOptimization/lmpar.h
index ab8549f1a..d763bd4e7 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/lmpar.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/lmpar.h
@@ -2,7 +2,7 @@
template <typename Scalar>
void ei_lmpar(
Matrix< Scalar, Dynamic, Dynamic > &r,
- VectorXi &ipvt, // TODO : const once ipvt mess fixed
+ const VectorXi &ipvt,
const Matrix< Scalar, Dynamic, 1 > &diag,
const Matrix< Scalar, Dynamic, 1 > &qtb,
Scalar delta,
@@ -30,7 +30,6 @@ void ei_lmpar(
/* compute and store in x the gauss-newton direction. if the */
/* jacobian is rank-deficient, obtain a least squares solution. */
-
int nsing = n-1;
wa1 = qtb;
for (j = 0; j < n; ++j) {
@@ -52,7 +51,6 @@ void ei_lmpar(
/* initialize the iteration counter. */
/* evaluate the function at the origin, and test */
/* for acceptance of the gauss-newton direction. */
-
iter = 0;
wa2 = diag.cwiseProduct(x);
dxnorm = wa2.blueNorm();
@@ -65,7 +63,6 @@ void ei_lmpar(
/* if the jacobian is not rank deficient, the newton */
/* step provides a lower bound, parl, for the zero of */
/* the function. otherwise set this bound to zero. */
-
parl = 0.;
if (nsing >= n-1) {
for (j = 0; j < n; ++j) {
@@ -85,7 +82,6 @@ void ei_lmpar(
}
/* calculate an upper bound, paru, for the zero of the function. */
-
for (j = 0; j < n; ++j)
wa1[j] = r.col(j).head(j+1).dot(qtb.head(j+1)) / diag[ipvt[j]];
@@ -96,22 +92,18 @@ void ei_lmpar(
/* if the input par lies outside of the interval (parl,paru), */
/* set par to the closer endpoint. */
-
par = std::max(par,parl);
par = std::min(par,paru);
if (par == 0.)
par = gnorm / dxnorm;
/* beginning of an iteration. */
-
while (true) {
++iter;
/* evaluate the function at the current value of par. */
-
if (par == 0.)
par = std::max(dwarf,Scalar(.001) * paru); /* Computing MAX */
-
wa1 = ei_sqrt(par)* diag;
Matrix< Scalar, Dynamic, 1 > sdiag(n);
@@ -125,12 +117,10 @@ void ei_lmpar(
/* if the function is small enough, accept the current value */
/* of par. also test for the exceptional cases where parl */
/* is zero or the number of iterations has reached 10. */
-
if (ei_abs(fp) <= Scalar(0.1) * delta || (parl == 0. && fp <= temp && temp < 0.) || iter == 10)
break;
/* compute the newton correction. */
-
for (j = 0; j < n; ++j) {
l = ipvt[j];
wa1[j] = diag[l] * (wa2[l] / dxnorm);
@@ -145,23 +135,148 @@ void ei_lmpar(
parc = fp / delta / temp / temp;
/* depending on the sign of the function, update parl or paru. */
-
if (fp > 0.)
parl = std::max(parl,par);
if (fp < 0.)
paru = std::min(paru,par);
/* compute an improved estimate for par. */
-
/* Computing MAX */
par = std::max(parl,par+parc);
/* end of an iteration. */
-
}
/* termination. */
+ if (iter == 0)
+ par = 0.;
+ return;
+}
+
+template <typename Scalar>
+void ei_lmpar2(
+ const ColPivHouseholderQR<Matrix< Scalar, Dynamic, Dynamic> > &qr,
+ const Matrix< Scalar, Dynamic, 1 > &diag,
+ const Matrix< Scalar, Dynamic, 1 > &qtb,
+ Scalar delta,
+ Scalar &par,
+ Matrix< Scalar, Dynamic, 1 > &x)
+{
+ /* Local variables */
+ int j;
+ Scalar fp;
+ Scalar parc, parl;
+ int iter;
+ Scalar temp, paru;
+ Scalar gnorm;
+ Scalar dxnorm;
+
+
+ /* Function Body */
+ const Scalar dwarf = std::numeric_limits<Scalar>::min();
+ const int n = qr.matrixQR().cols();
+ assert(n==diag.size());
+ assert(n==qtb.size());
+
+ Matrix< Scalar, Dynamic, 1 > wa1, wa2;
+
+ /* compute and store in x the gauss-newton direction. if the */
+ /* jacobian is rank-deficient, obtain a least squares solution. */
+
+// const int rank = qr.nonzeroPivots(); // exactly double(0.)
+ const int rank = qr.rank(); // use a threshold
+ wa1 = qtb;
+ wa1.tail(n-rank).setZero();
+ qr.matrixQR().corner(TopLeft, rank, rank).template triangularView<Upper>().solveInPlace(wa1.head(rank));
+
+ x = qr.colsPermutation()*wa1;
+
+ /* initialize the iteration counter. */
+ /* evaluate the function at the origin, and test */
+ /* for acceptance of the gauss-newton direction. */
+ iter = 0;
+ wa2 = diag.cwiseProduct(x);
+ dxnorm = wa2.blueNorm();
+ fp = dxnorm - delta;
+ if (fp <= Scalar(0.1) * delta) {
+ par = 0;
+ return;
+ }
+
+ /* if the jacobian is not rank deficient, the newton */
+ /* step provides a lower bound, parl, for the zero of */
+ /* the function. otherwise set this bound to zero. */
+ parl = 0.;
+ if (rank==n) {
+ wa1 = qr.colsPermutation().inverse() * diag.cwiseProduct(wa2)/dxnorm;
+ qr.matrixQR().corner(TopLeft, n, n).transpose().template triangularView<Lower>().solveInPlace(wa1);
+ temp = wa1.blueNorm();
+ parl = fp / delta / temp / temp;
+ }
+
+ /* calculate an upper bound, paru, for the zero of the function. */
+ for (j = 0; j < n; ++j)
+ wa1[j] = qr.matrixQR().col(j).head(j+1).dot(qtb.head(j+1)) / diag[qr.colsPermutation().indices()(j)];
+
+ gnorm = wa1.stableNorm();
+ paru = gnorm / delta;
+ if (paru == 0.)
+ paru = dwarf / std::min(delta,Scalar(0.1));
+
+ /* if the input par lies outside of the interval (parl,paru), */
+ /* set par to the closer endpoint. */
+ par = std::max(par,parl);
+ par = std::min(par,paru);
+ if (par == 0.)
+ par = gnorm / dxnorm;
+
+ /* beginning of an iteration. */
+ Matrix< Scalar, Dynamic, Dynamic > s = qr.matrixQR();
+ while (true) {
+ ++iter;
+
+ /* evaluate the function at the current value of par. */
+ if (par == 0.)
+ par = std::max(dwarf,Scalar(.001) * paru); /* Computing MAX */
+ wa1 = ei_sqrt(par)* diag;
+
+ Matrix< Scalar, Dynamic, 1 > sdiag(n);
+ ei_qrsolv<Scalar>(s, qr.colsPermutation().indices(), wa1, qtb, x, sdiag);
+
+ wa2 = diag.cwiseProduct(x);
+ dxnorm = wa2.blueNorm();
+ temp = fp;
+ fp = dxnorm - delta;
+
+ /* if the function is small enough, accept the current value */
+ /* of par. also test for the exceptional cases where parl */
+ /* is zero or the number of iterations has reached 10. */
+ if (ei_abs(fp) <= Scalar(0.1) * delta || (parl == 0. && fp <= temp && temp < 0.) || iter == 10)
+ break;
+
+ /* compute the newton correction. */
+ wa1 = qr.colsPermutation().inverse() * diag.cwiseProduct(wa2/dxnorm);
+ // we could almost use this here, but the diagonal is outside qr, in sdiag[]
+ // qr.matrixQR().corner(TopLeft, n, n).transpose().template triangularView<Lower>().solveInPlace(wa1);
+ for (j = 0; j < n; ++j) {
+ wa1[j] /= sdiag[j];
+ temp = wa1[j];
+ for (int i = j+1; i < n; ++i)
+ wa1[i] -= s(i,j) * temp;
+ }
+ temp = wa1.blueNorm();
+ parc = fp / delta / temp / temp;
+
+ /* depending on the sign of the function, update parl or paru. */
+ if (fp > 0.)
+ parl = std::max(parl,par);
+ if (fp < 0.)
+ paru = std::min(paru,par);
+
+ /* compute an improved estimate for par. */
+ par = std::max(parl,par+parc);
+ }
if (iter == 0)
par = 0.;
return;
diff --git a/unsupported/Eigen/src/NonLinearOptimization/qform.h b/unsupported/Eigen/src/NonLinearOptimization/qform.h
deleted file mode 100644
index 47bbd1fbe..000000000
--- a/unsupported/Eigen/src/NonLinearOptimization/qform.h
+++ /dev/null
@@ -1,89 +0,0 @@
-
-template <typename Scalar>
-void ei_qform(int m, int n, Scalar *q, int
- ldq, Scalar *wa)
-{
- /* System generated locals */
- int q_dim1, q_offset;
-
- /* Local variables */
- int i, j, k, l, jm1, np1;
- Scalar sum, temp;
- int minmn;
-
- /* Parameter adjustments */
- --wa;
- q_dim1 = ldq;
- q_offset = 1 + q_dim1 * 1;
- q -= q_offset;
-
- /* Function Body */
-
- /* zero out upper triangle of q in the first min(m,n) columns. */
-
- minmn = std::min(m,n);
- if (minmn < 2) {
- goto L30;
- }
- for (j = 2; j <= minmn; ++j) {
- jm1 = j - 1;
- for (i = 1; i <= jm1; ++i) {
- q[i + j * q_dim1] = 0.;
- /* L10: */
- }
- /* L20: */
- }
-L30:
-
- /* initialize remaining columns to those of the identity matrix. */
-
- np1 = n + 1;
- if (m < np1) {
- goto L60;
- }
- for (j = np1; j <= m; ++j) {
- for (i = 1; i <= m; ++i) {
- q[i + j * q_dim1] = 0.;
- /* L40: */
- }
- q[j + j * q_dim1] = 1.;
- /* L50: */
- }
-L60:
-
- /* accumulate q from its factored form. */
-
- for (l = 1; l <= minmn; ++l) {
- k = minmn - l + 1;
- for (i = k; i <= m; ++i) {
- wa[i] = q[i + k * q_dim1];
- q[i + k * q_dim1] = 0.;
- /* L70: */
- }
- q[k + k * q_dim1] = 1.;
- if (wa[k] == 0.) {
- goto L110;
- }
- for (j = k; j <= m; ++j) {
- sum = 0.;
- for (i = k; i <= m; ++i) {
- sum += q[i + j * q_dim1] * wa[i];
- /* L80: */
- }
- temp = sum / wa[k];
- for (i = k; i <= m; ++i) {
- q[i + j * q_dim1] -= temp * wa[i];
- /* L90: */
- }
- /* L100: */
- }
-L110:
- /* L120: */
- ;
- }
- return;
-
- /* last card of subroutine qform. */
-
-} /* qform_ */
-
diff --git a/unsupported/Eigen/src/NonLinearOptimization/qrfac.h b/unsupported/Eigen/src/NonLinearOptimization/qrfac.h
deleted file mode 100644
index 0c1ecf394..000000000
--- a/unsupported/Eigen/src/NonLinearOptimization/qrfac.h
+++ /dev/null
@@ -1,131 +0,0 @@
-
-template <typename Scalar>
-void ei_qrfac(int m, int n, Scalar *a, int
- lda, int pivot, int *ipvt, Scalar *rdiag)
-{
- /* System generated locals */
- int a_dim1, a_offset;
-
- /* Local variables */
- int i, j, k, jp1;
- Scalar sum;
- int kmax;
- Scalar temp;
- int minmn;
- Scalar ajnorm;
-
- Matrix< Scalar, Dynamic, 1 > wa(n+1);
-
- /* Parameter adjustments */
- --rdiag;
- a_dim1 = lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
- --ipvt;
-
- /* Function Body */
- const Scalar epsmch = epsilon<Scalar>();
-
- /* compute the initial column norms and initialize several arrays. */
-
- for (j = 1; j <= n; ++j) {
- rdiag[j] = Map< Matrix< Scalar, Dynamic, 1 > >(&a[j * a_dim1 + 1],m).blueNorm();
- wa[j] = rdiag[j];
- if (pivot)
- ipvt[j] = j;
- }
-
- /* reduce a to r with householder transformations. */
-
- minmn = std::min(m,n);
- for (j = 1; j <= minmn; ++j) {
- if (! (pivot)) {
- goto L40;
- }
-
- /* bring the column of largest norm into the pivot position. */
-
- kmax = j;
- for (k = j; k <= n; ++k) {
- if (rdiag[k] > rdiag[kmax]) {
- kmax = k;
- }
- /* L20: */
- }
- if (kmax == j) {
- goto L40;
- }
- for (i = 1; i <= m; ++i) {
- temp = a[i + j * a_dim1];
- a[i + j * a_dim1] = a[i + kmax * a_dim1];
- a[i + kmax * a_dim1] = temp;
- /* L30: */
- }
- rdiag[kmax] = rdiag[j];
- wa[kmax] = wa[j];
- k = ipvt[j];
- ipvt[j] = ipvt[kmax];
- ipvt[kmax] = k;
-L40:
-
- /* compute the householder transformation to reduce the */
- /* j-th column of a to a multiple of the j-th unit vector. */
-
- ajnorm = Map< Matrix< Scalar, Dynamic, 1 > >(&a[j + j * a_dim1],m-j+1).blueNorm();
- if (ajnorm == 0.) {
- goto L100;
- }
- if (a[j + j * a_dim1] < 0.) {
- ajnorm = -ajnorm;
- }
- for (i = j; i <= m; ++i) {
- a[i + j * a_dim1] /= ajnorm;
- /* L50: */
- }
- a[j + j * a_dim1] += 1.;
-
- /* apply the transformation to the remaining columns */
- /* and update the norms. */
-
- jp1 = j + 1;
- if (n < jp1) {
- goto L100;
- }
- for (k = jp1; k <= n; ++k) {
- sum = 0.;
- for (i = j; i <= m; ++i) {
- sum += a[i + j * a_dim1] * a[i + k * a_dim1];
- /* L60: */
- }
- temp = sum / a[j + j * a_dim1];
- for (i = j; i <= m; ++i) {
- a[i + k * a_dim1] -= temp * a[i + j * a_dim1];
- /* L70: */
- }
- if (! (pivot) || rdiag[k] == 0.) {
- goto L80;
- }
- temp = a[j + k * a_dim1] / rdiag[k];
- /* Computing MAX */
- /* Computing 2nd power */
- rdiag[k] *= ei_sqrt((std::max(Scalar(0.), Scalar(1.)-ei_abs2(temp))));
- /* Computing 2nd power */
- if (Scalar(.05) * ei_abs2(rdiag[k] / wa[k]) > epsmch) {
- goto L80;
- }
- rdiag[k] = Map< Matrix< Scalar, Dynamic, 1 > >(&a[jp1 + k * a_dim1],m-j).blueNorm();
- wa[k] = rdiag[k];
-L80:
- /* L90: */
- ;
- }
-L100:
- rdiag[j] = -ajnorm;
- /* L110: */
- }
- return;
-
- /* last card of subroutine qrfac. */
-
-} /* qrfac_ */
-
diff --git a/unsupported/Eigen/src/NonLinearOptimization/qrsolv.h b/unsupported/Eigen/src/NonLinearOptimization/qrsolv.h
index 1ee21d5ed..f89a5f9a8 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/qrsolv.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/qrsolv.h
@@ -1,8 +1,10 @@
+// TODO : once qrsolv2 is removed, use ColPivHouseholderQR or PermutationMatrix instead of ipvt
template <typename Scalar>
void ei_qrsolv(
- Matrix< Scalar, Dynamic, Dynamic > &r,
- VectorXi &ipvt, // TODO : const once ipvt mess fixed
+ Matrix< Scalar, Dynamic, Dynamic > &s,
+ // TODO : use a PermutationMatrix once ei_lmpar is no more:
+ const VectorXi &ipvt,
const Matrix< Scalar, Dynamic, 1 > &diag,
const Matrix< Scalar, Dynamic, 1 > &qtb,
Matrix< Scalar, Dynamic, 1 > &x,
@@ -11,80 +13,69 @@ void ei_qrsolv(
{
/* Local variables */
int i, j, k, l;
- Scalar sum, temp;
- int n = r.cols();
+ Scalar temp;
+ int n = s.cols();
Matrix< Scalar, Dynamic, 1 > wa(n);
+ PlanarRotation<Scalar> givens;
/* Function Body */
+ // the following will only change the lower triangular part of s, including
+ // the diagonal, though the diagonal is restored afterward
/* copy r and (q transpose)*b to preserve input and initialize s. */
/* in particular, save the diagonal elements of r in x. */
-
- x = r.diagonal();
+ x = s.diagonal();
wa = qtb;
- for (j = 0; j < n; ++j)
- for (i = j+1; i < n; ++i)
- r(i,j) = r(j,i);
+ s.corner(TopLeft,n,n).template triangularView<StrictlyLower>() = s.corner(TopLeft,n,n).transpose();
/* eliminate the diagonal matrix d using a givens rotation. */
for (j = 0; j < n; ++j) {
/* prepare the row of d to be eliminated, locating the */
/* diagonal element using p from the qr factorization. */
-
l = ipvt[j];
if (diag[l] == 0.)
break;
- sdiag.segment(j,n-j).setZero();
+ sdiag.tail(n-j).setZero();
sdiag[j] = diag[l];
/* the transformations to eliminate the row of d */
/* modify only a single element of (q transpose)*b */
/* beyond the first n, which is initially zero. */
-
Scalar qtbpj = 0.;
for (k = j; k < n; ++k) {
/* determine a givens rotation which eliminates the */
/* appropriate element in the current row of d. */
- PlanarRotation<Scalar> givens;
- givens.makeGivens(-r(k,k), sdiag[k]);
+ givens.makeGivens(-s(k,k), sdiag[k]);
/* compute the modified diagonal element of r and */
/* the modified element of ((q transpose)*b,0). */
-
- r(k,k) = givens.c() * r(k,k) + givens.s() * sdiag[k];
+ s(k,k) = givens.c() * s(k,k) + givens.s() * sdiag[k];
temp = givens.c() * wa[k] + givens.s() * qtbpj;
qtbpj = -givens.s() * wa[k] + givens.c() * qtbpj;
wa[k] = temp;
/* accumulate the tranformation in the row of s. */
for (i = k+1; i<n; ++i) {
- temp = givens.c() * r(i,k) + givens.s() * sdiag[i];
- sdiag[i] = -givens.s() * r(i,k) + givens.c() * sdiag[i];
- r(i,k) = temp;
+ temp = givens.c() * s(i,k) + givens.s() * sdiag[i];
+ sdiag[i] = -givens.s() * s(i,k) + givens.c() * sdiag[i];
+ s(i,k) = temp;
}
}
}
- // restore
- sdiag = r.diagonal();
- r.diagonal() = x;
-
/* solve the triangular system for z. if the system is */
/* singular, then obtain a least squares solution. */
-
int nsing;
for (nsing=0; nsing<n && sdiag[nsing]!=0; nsing++);
- wa.segment(nsing,n-nsing).setZero();
- nsing--; // nsing is the last nonsingular index
- for (j = nsing; j>=0; j--) {
- sum = 0.;
- for (i = j+1; i <= nsing; ++i)
- sum += r(i,j) * wa[i];
- wa[j] = (wa[j] - sum) / sdiag[j];
- }
+ wa.tail(n-nsing).setZero();
+ s.corner(TopLeft, nsing, nsing).transpose().template triangularView<Upper>().solveInPlace(wa.head(nsing));
+
+ // restore
+ sdiag = s.diagonal();
+ s.diagonal() = x;
/* permute the components of z back to components of x. */
for (j = 0; j < n; ++j) x[ipvt[j]] = wa[j];
diff --git a/unsupported/Eigen/src/NonLinearOptimization/r1mpyq.h b/unsupported/Eigen/src/NonLinearOptimization/r1mpyq.h
index 70a6d30c3..855cb7a1b 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/r1mpyq.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/r1mpyq.h
@@ -1,60 +1,22 @@
+// TODO : move this to GivensQR once there's such a thing in Eigen
+
template <typename Scalar>
-void ei_r1mpyq(int m, int n, Scalar *a, int
- lda, const Scalar *v, const Scalar *w)
+void ei_r1mpyq(int m, int n, Scalar *a, const std::vector<PlanarRotation<Scalar> > &v_givens, const std::vector<PlanarRotation<Scalar> > &w_givens)
{
- /* System generated locals */
- int a_dim1, a_offset;
-
- /* Local variables */
- int i, j, nm1, nmj;
- Scalar cos__=0., sin__=0., temp;
-
- /* Parameter adjustments */
- --w;
- --v;
- a_dim1 = lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
-
- /* Function Body */
- nm1 = n - 1;
- if (nm1 < 1)
- return;
-
/* apply the first set of givens rotations to a. */
- for (nmj = 1; nmj <= nm1; ++nmj) {
- j = n - nmj;
- if (ei_abs(v[j]) > 1.) {
- cos__ = 1. / v[j];
- sin__ = ei_sqrt(1. - ei_abs2(cos__));
- } else {
- sin__ = v[j];
- cos__ = ei_sqrt(1. - ei_abs2(sin__));
- }
- for (i = 1; i <= m; ++i) {
- temp = cos__ * a[i + j * a_dim1] - sin__ * a[i + n * a_dim1];
- a[i + n * a_dim1] = sin__ * a[i + j * a_dim1] + cos__ * a[
- i + n * a_dim1];
- a[i + j * a_dim1] = temp;
+ for (int j = n-2; j>=0; --j)
+ for (int i = 0; i<m; ++i) {
+ Scalar temp = v_givens[j].c() * a[i+m*j] - v_givens[j].s() * a[i+m*(n-1)];
+ a[i+m*(n-1)] = v_givens[j].s() * a[i+m*j] + v_givens[j].c() * a[i+m*(n-1)];
+ a[i+m*j] = temp;
}
- }
/* apply the second set of givens rotations to a. */
- for (j = 1; j <= nm1; ++j) {
- if (ei_abs(w[j]) > 1.) {
- cos__ = 1. / w[j];
- sin__ = ei_sqrt(1. - ei_abs2(cos__));
- } else {
- sin__ = w[j];
- cos__ = ei_sqrt(1. - ei_abs2(sin__));
- }
- for (i = 1; i <= m; ++i) {
- temp = cos__ * a[i + j * a_dim1] + sin__ * a[i + n * a_dim1];
- a[i + n * a_dim1] = -sin__ * a[i + j * a_dim1] + cos__ * a[
- i + n * a_dim1];
- a[i + j * a_dim1] = temp;
+ for (int j = 0; j<n-1; ++j)
+ for (int i = 0; i<m; ++i) {
+ Scalar temp = w_givens[j].c() * a[i+m*j] + w_givens[j].s() * a[i+m*(n-1)];
+ a[i+m*(n-1)] = -w_givens[j].s() * a[i+m*j] + w_givens[j].c() * a[i+m*(n-1)];
+ a[i+m*j] = temp;
}
- }
- return;
-} /* r1mpyq_ */
+}
diff --git a/unsupported/Eigen/src/NonLinearOptimization/r1updt.h b/unsupported/Eigen/src/NonLinearOptimization/r1updt.h
index f2ddb189b..3d8978837 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/r1updt.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/r1updt.h
@@ -1,175 +1,88 @@
- template <typename Scalar>
-void ei_r1updt(int m, int n, Scalar *s, int /* ls */, const Scalar *u, Scalar *v, Scalar *w, bool *sing)
+template <typename Scalar>
+void ei_r1updt(
+ Matrix< Scalar, Dynamic, Dynamic > &s,
+ const Matrix< Scalar, Dynamic, 1> &u,
+ std::vector<PlanarRotation<Scalar> > &v_givens,
+ std::vector<PlanarRotation<Scalar> > &w_givens,
+ Matrix< Scalar, Dynamic, 1> &v,
+ Matrix< Scalar, Dynamic, 1> &w,
+ bool *sing)
{
/* Local variables */
- int i, j, l, jj, nm1;
- Scalar tan__;
- int nmj;
- Scalar cos__, sin__, tau, temp, cotan;
-
- /* Parameter adjustments */
- --w;
- --u;
- --v;
- --s;
-
- /* Function Body */
- const Scalar giant = std::numeric_limits<Scalar>::max();
-
- /* initialize the diagonal element pointer. */
-
- jj = n * ((m << 1) - n + 1) / 2 - (m - n);
-
- /* move the nontrivial part of the last column of s into w. */
-
- l = jj;
- for (i = n; i <= m; ++i) {
- w[i] = s[l];
- ++l;
- /* L10: */
- }
-
- /* rotate the vector v into a multiple of the n-th unit vector */
- /* in such a way that a spike is introduced into w. */
-
- nm1 = n - 1;
- if (nm1 < 1) {
- goto L70;
- }
- for (nmj = 1; nmj <= nm1; ++nmj) {
- j = n - nmj;
- jj -= m - j + 1;
+ const int m = s.rows();
+ const int n = s.cols();
+ int i, j=1;
+ Scalar temp;
+ PlanarRotation<Scalar> givens;
+
+ // ei_r1updt had a broader usecase, but we dont use it here. And, more
+ // importantly, we can not test it.
+ assert(m==n);
+ assert(u.size()==m);
+ assert(v.size()==n);
+ assert(w.size()==n);
+
+ /* move the nontrivial part of the last column of s into w. */
+ w[n-1] = s(n-1,n-1);
+
+ /* rotate the vector v into a multiple of the n-th unit vector */
+ /* in such a way that a spike is introduced into w. */
+ for (j=n-2; j>=0; --j) {
w[j] = 0.;
- if (v[j] == 0.) {
- goto L50;
- }
-
- /* determine a givens rotation which eliminates the */
- /* j-th element of v. */
-
- if (ei_abs(v[n]) >= ei_abs(v[j]))
- goto L20;
- cotan = v[n] / v[j];
- /* Computing 2nd power */
- sin__ = Scalar(.5) / ei_sqrt(Scalar(0.25) + Scalar(0.25) * ei_abs2(cotan));
- cos__ = sin__ * cotan;
- tau = 1.;
- if (ei_abs(cos__) * giant > 1.) {
- tau = 1. / cos__;
- }
- goto L30;
-L20:
- tan__ = v[j] / v[n];
- /* Computing 2nd power */
- cos__ = Scalar(.5) / ei_sqrt(Scalar(0.25) + Scalar(0.25) * ei_abs2(tan__));
- sin__ = cos__ * tan__;
- tau = sin__;
-L30:
-
- /* apply the transformation to v and store the information */
- /* necessary to recover the givens rotation. */
-
- v[n] = sin__ * v[j] + cos__ * v[n];
- v[j] = tau;
-
- /* apply the transformation to s and extend the spike in w. */
-
- l = jj;
- for (i = j; i <= m; ++i) {
- temp = cos__ * s[l] - sin__ * w[i];
- w[i] = sin__ * s[l] + cos__ * w[i];
- s[l] = temp;
- ++l;
- /* L40: */
+ if (v[j] != 0.) {
+ /* determine a givens rotation which eliminates the */
+ /* j-th element of v. */
+ givens.makeGivens(-v[n-1], v[j]);
+
+ /* apply the transformation to v and store the information */
+ /* necessary to recover the givens rotation. */
+ v[n-1] = givens.s() * v[j] + givens.c() * v[n-1];
+ v_givens[j] = givens;
+
+ /* apply the transformation to s and extend the spike in w. */
+ for (i = j; i < m; ++i) {
+ temp = givens.c() * s(j,i) - givens.s() * w[i];
+ w[i] = givens.s() * s(j,i) + givens.c() * w[i];
+ s(j,i) = temp;
+ }
}
-L50:
- /* L60: */
- ;
}
-L70:
- /* add the spike from the rank 1 update to w. */
-
- for (i = 1; i <= m; ++i) {
- w[i] += v[n] * u[i];
- /* L80: */
- }
-
- /* eliminate the spike. */
+ /* add the spike from the rank 1 update to w. */
+ w += v[n-1] * u;
+ /* eliminate the spike. */
*sing = false;
- if (nm1 < 1) {
- goto L140;
- }
- for (j = 1; j <= nm1; ++j) {
- if (w[j] == 0.) {
- goto L120;
+ for (j = 0; j < n-1; ++j) {
+ if (w[j] != 0.) {
+ /* determine a givens rotation which eliminates the */
+ /* j-th element of the spike. */
+ givens.makeGivens(-s(j,j), w[j]);
+
+ /* apply the transformation to s and reduce the spike in w. */
+ for (i = j; i < m; ++i) {
+ temp = givens.c() * s(j,i) + givens.s() * w[i];
+ w[i] = -givens.s() * s(j,i) + givens.c() * w[i];
+ s(j,i) = temp;
+ }
+
+ /* store the information necessary to recover the */
+ /* givens rotation. */
+ w_givens[j] = givens;
}
- /* determine a givens rotation which eliminates the */
- /* j-th element of the spike. */
-
- if (ei_abs(s[jj]) >= ei_abs(w[j]))
- goto L90;
- cotan = s[jj] / w[j];
- /* Computing 2nd power */
- sin__ = Scalar(.5) / ei_sqrt(Scalar(0.25) + Scalar(0.25) * ei_abs2(cotan));
- cos__ = sin__ * cotan;
- tau = 1.;
- if (ei_abs(cos__) * giant > 1.) {
- tau = 1. / cos__;
- }
- goto L100;
-L90:
- tan__ = w[j] / s[jj];
- /* Computing 2nd power */
- cos__ = Scalar(.5) / ei_sqrt(Scalar(0.25) + Scalar(0.25) * ei_abs2(tan__));
- sin__ = cos__ * tan__;
- tau = sin__;
-L100:
-
- /* apply the transformation to s and reduce the spike in w. */
-
- l = jj;
- for (i = j; i <= m; ++i) {
- temp = cos__ * s[l] + sin__ * w[i];
- w[i] = -sin__ * s[l] + cos__ * w[i];
- s[l] = temp;
- ++l;
- /* L110: */
- }
-
- /* store the information necessary to recover the */
- /* givens rotation. */
-
- w[j] = tau;
-L120:
-
- /* test for zero diagonal elements in the output s. */
-
- if (s[jj] == 0.) {
+ /* test for zero diagonal elements in the output s. */
+ if (s(j,j) == 0.) {
*sing = true;
}
- jj += m - j + 1;
- /* L130: */
}
-L140:
+ /* move w back into the last column of the output s. */
+ s(n-1,n-1) = w[n-1];
- /* move w back into the last column of the output s. */
-
- l = jj;
- for (i = n; i <= m; ++i) {
- s[l] = w[i];
- ++l;
- /* L150: */
- }
- if (s[jj] == 0.) {
+ if (s(j,j) == 0.) {
*sing = true;
}
return;
-
- /* last card of subroutine r1updt. */
-
-} /* r1updt_ */
+}
diff --git a/unsupported/Eigen/src/NonLinearOptimization/rwupdt.h b/unsupported/Eigen/src/NonLinearOptimization/rwupdt.h
index 6be292f6a..7703ff8de 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/rwupdt.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/rwupdt.h
@@ -1,80 +1,41 @@
template <typename Scalar>
-void ei_rwupdt(int n, Scalar *r__, int ldr,
- const Scalar *w, Scalar *b, Scalar *alpha, Scalar *cos__,
- Scalar *sin__)
+void ei_rwupdt(
+ Matrix< Scalar, Dynamic, Dynamic > &r,
+ const Matrix< Scalar, Dynamic, 1> &w,
+ Matrix< Scalar, Dynamic, 1> &b,
+ Scalar alpha)
{
- /* System generated locals */
- int r_dim1, r_offset;
+ const int n = r.cols();
+ assert(r.rows()>=n);
+ std::vector<PlanarRotation<Scalar> > givens(n);
/* Local variables */
- int i, j, jm1;
- Scalar tan__, temp, rowj, cotan;
-
- /* Parameter adjustments */
- --sin__;
- --cos__;
- --b;
- --w;
- r_dim1 = ldr;
- r_offset = 1 + r_dim1 * 1;
- r__ -= r_offset;
+ Scalar temp, rowj;
/* Function Body */
-
- for (j = 1; j <= n; ++j) {
- rowj = w[j];
- jm1 = j - 1;
-
-/* apply the previous transformations to */
-/* r(i,j), i=1,2,...,j-1, and to w(j). */
-
- if (jm1 < 1) {
- goto L20;
- }
- for (i = 1; i <= jm1; ++i) {
- temp = cos__[i] * r__[i + j * r_dim1] + sin__[i] * rowj;
- rowj = -sin__[i] * r__[i + j * r_dim1] + cos__[i] * rowj;
- r__[i + j * r_dim1] = temp;
-/* L10: */
- }
-L20:
-
-/* determine a givens rotation which eliminates w(j). */
-
- cos__[j] = 1.;
- sin__[j] = 0.;
- if (rowj == 0.) {
- goto L50;
- }
- if (ei_abs(r__[j + j * r_dim1]) >= ei_abs(rowj))
- goto L30;
- cotan = r__[j + j * r_dim1] / rowj;
-/* Computing 2nd power */
- sin__[j] = Scalar(.5) / ei_sqrt(Scalar(0.25) + Scalar(0.25) * ei_abs2(cotan));
- cos__[j] = sin__[j] * cotan;
- goto L40;
-L30:
- tan__ = rowj / r__[j + j * r_dim1];
-/* Computing 2nd power */
- cos__[j] = Scalar(.5) / ei_sqrt(Scalar(0.25) + Scalar(0.25) * ei_abs2(tan__));
- sin__[j] = cos__[j] * tan__;
-L40:
-
-/* apply the current transformation to r(j,j), b(j), and alpha. */
-
- r__[j + j * r_dim1] = cos__[j] * r__[j + j * r_dim1] + sin__[j] *
- rowj;
- temp = cos__[j] * b[j] + sin__[j] * *alpha;
- *alpha = -sin__[j] * b[j] + cos__[j] * *alpha;
- b[j] = temp;
-L50:
-/* L60: */
- ;
+ for (int j = 0; j < n; ++j) {
+ rowj = w[j];
+
+ /* apply the previous transformations to */
+ /* r(i,j), i=0,1,...,j-1, and to w(j). */
+ for (int i = 0; i < j; ++i) {
+ temp = givens[i].c() * r(i,j) + givens[i].s() * rowj;
+ rowj = -givens[i].s() * r(i,j) + givens[i].c() * rowj;
+ r(i,j) = temp;
+ }
+
+ if (rowj == 0.)
+ continue;
+
+ /* determine a givens rotation which eliminates w(j). */
+ givens[j].makeGivens(-r(j,j), rowj);
+
+ /* apply the current transformation to r(j,j), b(j), and alpha. */
+ r(j,j) = givens[j].c() * r(j,j) + givens[j].s() * rowj;
+ temp = givens[j].c() * b[j] + givens[j].s() * alpha;
+ alpha = -givens[j].s() * b[j] + givens[j].c() * alpha;
+ b[j] = temp;
}
- return;
-
-/* last card of subroutine rwupdt. */
-
-} /* rwupdt_ */
+}
diff --git a/unsupported/Eigen/src/NumericalDiff/NumericalDiff.h b/unsupported/Eigen/src/NumericalDiff/NumericalDiff.h
index db6f791df..8d23cb4ae 100644
--- a/unsupported/Eigen/src/NumericalDiff/NumericalDiff.h
+++ b/unsupported/Eigen/src/NumericalDiff/NumericalDiff.h
@@ -80,7 +80,7 @@ public:
Scalar h;
int nfev=0;
const int n = _x.size();
- const Scalar eps = ei_sqrt((std::max(epsfcn,epsilon<Scalar>() )));
+ const Scalar eps = ei_sqrt((std::max(epsfcn,NumTraits<Scalar>::epsilon() )));
ValueType val1, val2;
InputType x = _x;
// TODO : we should do this only if the size is not already known
diff --git a/unsupported/doc/examples/MatrixExponential.cpp b/unsupported/doc/examples/MatrixExponential.cpp
index 7dc2396df..801ee4d01 100644
--- a/unsupported/doc/examples/MatrixExponential.cpp
+++ b/unsupported/doc/examples/MatrixExponential.cpp
@@ -12,7 +12,6 @@ int main()
0, 0, 0;
std::cout << "The matrix A is:\n" << A << "\n\n";
- MatrixXd B;
- ei_matrix_exponential(A, &B);
+ MatrixXd B = ei_matrix_exponential(A);
std::cout << "The matrix exponential of A is:\n" << B << "\n\n";
}
diff --git a/unsupported/doc/examples/MatrixFunction.cpp b/unsupported/doc/examples/MatrixFunction.cpp
index c11cb821b..075fe7361 100644
--- a/unsupported/doc/examples/MatrixFunction.cpp
+++ b/unsupported/doc/examples/MatrixFunction.cpp
@@ -15,9 +15,8 @@ int main()
A << 0, -pi/4, 0,
pi/4, 0, 0,
0, 0, 0;
- std::cout << "The matrix A is:\n" << A << "\n\n";
- MatrixXd B;
- ei_matrix_function(A, expfn, &B);
- std::cout << "The matrix exponential of A is:\n" << B << "\n\n";
+ std::cout << "The matrix A is:\n" << A << "\n\n";
+ std::cout << "The matrix exponential of A is:\n"
+ << ei_matrix_function(A, expfn) << "\n\n";
}
diff --git a/unsupported/doc/examples/MatrixSine.cpp b/unsupported/doc/examples/MatrixSine.cpp
index f8780ac92..2bbf99bbb 100644
--- a/unsupported/doc/examples/MatrixSine.cpp
+++ b/unsupported/doc/examples/MatrixSine.cpp
@@ -7,12 +7,10 @@ int main()
MatrixXd A = MatrixXd::Random(3,3);
std::cout << "A = \n" << A << "\n\n";
- MatrixXd sinA;
- ei_matrix_sin(A, &sinA);
+ MatrixXd sinA = ei_matrix_sin(A);
std::cout << "sin(A) = \n" << sinA << "\n\n";
- MatrixXd cosA;
- ei_matrix_cos(A, &cosA);
+ MatrixXd cosA = ei_matrix_cos(A);
std::cout << "cos(A) = \n" << cosA << "\n\n";
// The matrix functions satisfy sin^2(A) + cos^2(A) = I,
diff --git a/unsupported/doc/examples/MatrixSinh.cpp b/unsupported/doc/examples/MatrixSinh.cpp
index 488d95652..036534dea 100644
--- a/unsupported/doc/examples/MatrixSinh.cpp
+++ b/unsupported/doc/examples/MatrixSinh.cpp
@@ -7,12 +7,10 @@ int main()
MatrixXf A = MatrixXf::Random(3,3);
std::cout << "A = \n" << A << "\n\n";
- MatrixXf sinhA;
- ei_matrix_sinh(A, &sinhA);
+ MatrixXf sinhA = ei_matrix_sinh(A);
std::cout << "sinh(A) = \n" << sinhA << "\n\n";
- MatrixXf coshA;
- ei_matrix_cosh(A, &coshA);
+ MatrixXf coshA = ei_matrix_cosh(A);
std::cout << "cosh(A) = \n" << coshA << "\n\n";
// The matrix functions satisfy cosh^2(A) - sinh^2(A) = I,
diff --git a/unsupported/test/FFT.cpp b/unsupported/test/FFT.cpp
index 02cd5a48f..45c87f5a7 100644
--- a/unsupported/test/FFT.cpp
+++ b/unsupported/test/FFT.cpp
@@ -1,245 +1,2 @@
-#if 0
-
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2009 Mark Borgerding mark a borgerding net
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#include "main.h"
-#include <unsupported/Eigen/FFT>
-
-template <typename T>
-std::complex<T> RandomCpx() { return std::complex<T>( (T)(rand()/(T)RAND_MAX - .5), (T)(rand()/(T)RAND_MAX - .5) ); }
-
-using namespace std;
-using namespace Eigen;
-
-float norm(float x) {return x*x;}
-double norm(double x) {return x*x;}
-long double norm(long double x) {return x*x;}
-
-template < typename T>
-complex<long double> promote(complex<T> x) { return complex<long double>(x.real(),x.imag()); }
-
-complex<long double> promote(float x) { return complex<long double>( x); }
-complex<long double> promote(double x) { return complex<long double>( x); }
-complex<long double> promote(long double x) { return complex<long double>( x); }
-
-
- template <typename T1,typename T2>
- long double fft_rmse( const vector<T1> & fftbuf,const vector<T2> & timebuf)
- {
- long double totalpower=0;
- long double difpower=0;
- long double pi = acos((long double)-1 );
- for (size_t k0=0;k0<fftbuf.size();++k0) {
- complex<long double> acc = 0;
- long double phinc = -2.*k0* pi / timebuf.size();
- for (size_t k1=0;k1<timebuf.size();++k1) {
- acc += promote( timebuf[k1] ) * exp( complex<long double>(0,k1*phinc) );
- }
- totalpower += norm(acc);
- complex<long double> x = promote(fftbuf[k0]);
- complex<long double> dif = acc - x;
- difpower += norm(dif);
- cerr << k0 << "\t" << acc << "\t" << x << "\t" << sqrt(norm(dif)) << endl;
- }
- cerr << "rmse:" << sqrt(difpower/totalpower) << endl;
- return sqrt(difpower/totalpower);
- }
-
- template <typename T1,typename T2>
- long double dif_rmse( const vector<T1> buf1,const vector<T2> buf2)
- {
- long double totalpower=0;
- long double difpower=0;
- size_t n = min( buf1.size(),buf2.size() );
- for (size_t k=0;k<n;++k) {
- totalpower += (norm( buf1[k] ) + norm(buf2[k]) )/2.;
- difpower += norm(buf1[k] - buf2[k]);
- }
- return sqrt(difpower/totalpower);
- }
-
-enum { StdVectorContainer, EigenVectorContainer };
-
-template<int Container, typename Scalar> struct VectorType;
-
-template<typename Scalar> struct VectorType<StdVectorContainer,Scalar>
-{
- typedef vector<Scalar> type;
-};
-
-template<typename Scalar> struct VectorType<EigenVectorContainer,Scalar>
-{
- typedef Matrix<Scalar,Dynamic,1> type;
-};
-
-template <int Container, typename T>
-void test_scalar_generic(int nfft)
-{
- typedef typename FFT<T>::Complex Complex;
- typedef typename FFT<T>::Scalar Scalar;
- typedef typename VectorType<Container,Scalar>::type ScalarVector;
- typedef typename VectorType<Container,Complex>::type ComplexVector;
-
- FFT<T> fft;
- ScalarVector inbuf(nfft);
- ComplexVector outbuf;
- for (int k=0;k<nfft;++k)
- inbuf[k]= (T)(rand()/(double)RAND_MAX - .5);
-
- // make sure it DOESN'T give the right full spectrum answer
- // if we've asked for half-spectrum
- fft.SetFlag(fft.HalfSpectrum );
- fft.fwd( outbuf,inbuf);
- VERIFY(outbuf.size() == (size_t)( (nfft>>1)+1) );
- VERIFY( fft_rmse(outbuf,inbuf) < test_precision<T>() );// gross check
-
- fft.ClearFlag(fft.HalfSpectrum );
- fft.fwd( outbuf,inbuf);
- VERIFY( fft_rmse(outbuf,inbuf) < test_precision<T>() );// gross check
-
- ScalarVector buf3;
- fft.inv( buf3 , outbuf);
- VERIFY( dif_rmse(inbuf,buf3) < test_precision<T>() );// gross check
-
- // verify that the Unscaled flag takes effect
- ComplexVector buf4;
- fft.SetFlag(fft.Unscaled);
- fft.inv( buf4 , outbuf);
- for (int k=0;k<nfft;++k)
- buf4[k] *= T(1./nfft);
- VERIFY( dif_rmse(inbuf,buf4) < test_precision<T>() );// gross check
-
- // verify that ClearFlag works
- fft.ClearFlag(fft.Unscaled);
- fft.inv( buf3 , outbuf);
- VERIFY( dif_rmse(inbuf,buf3) < test_precision<T>() );// gross check
-}
-
-template <typename T>
-void test_scalar(int nfft)
-{
- test_scalar_generic<StdVectorContainer,T>(nfft);
- test_scalar_generic<EigenVectorContainer,T>(nfft);
-}
-
-template <int Container, typename T>
-void test_complex_generic(int nfft)
-{
- typedef typename FFT<T>::Complex Complex;
- typedef typename VectorType<Container,Complex>::type ComplexVector;
-
- FFT<T> fft;
-
- ComplexVector inbuf(nfft);
- ComplexVector outbuf;
- ComplexVector buf3;
- for (int k=0;k<nfft;++k)
- inbuf[k]= Complex( (T)(rand()/(double)RAND_MAX - .5), (T)(rand()/(double)RAND_MAX - .5) );
- fft.fwd( outbuf , inbuf);
-
- VERIFY( fft_rmse(outbuf,inbuf) < test_precision<T>() );// gross check
-
- fft.inv( buf3 , outbuf);
-
- VERIFY( dif_rmse(inbuf,buf3) < test_precision<T>() );// gross check
-
- // verify that the Unscaled flag takes effect
- ComplexVector buf4;
- fft.SetFlag(fft.Unscaled);
- fft.inv( buf4 , outbuf);
- for (int k=0;k<nfft;++k)
- buf4[k] *= T(1./nfft);
- VERIFY( dif_rmse(inbuf,buf4) < test_precision<T>() );// gross check
-
- // verify that ClearFlag works
- fft.ClearFlag(fft.Unscaled);
- fft.inv( buf3 , outbuf);
- VERIFY( dif_rmse(inbuf,buf3) < test_precision<T>() );// gross check
-}
-
-template <typename T>
-void test_complex(int nfft)
-{
- test_complex_generic<StdVectorContainer,T>(nfft);
- test_complex_generic<EigenVectorContainer,T>(nfft);
-}
-
-void test_FFT()
-{
-
- CALL_SUBTEST( test_complex<float>(32) );
- CALL_SUBTEST( test_complex<double>(32) );
- CALL_SUBTEST( test_complex<long double>(32) );
-
- CALL_SUBTEST( test_complex<float>(256) );
- CALL_SUBTEST( test_complex<double>(256) );
- CALL_SUBTEST( test_complex<long double>(256) );
-
- CALL_SUBTEST( test_complex<float>(3*8) );
- CALL_SUBTEST( test_complex<double>(3*8) );
- CALL_SUBTEST( test_complex<long double>(3*8) );
-
- CALL_SUBTEST( test_complex<float>(5*32) );
- CALL_SUBTEST( test_complex<double>(5*32) );
- CALL_SUBTEST( test_complex<long double>(5*32) );
-
- CALL_SUBTEST( test_complex<float>(2*3*4) );
- CALL_SUBTEST( test_complex<double>(2*3*4) );
- CALL_SUBTEST( test_complex<long double>(2*3*4) );
-
- CALL_SUBTEST( test_complex<float>(2*3*4*5) );
- CALL_SUBTEST( test_complex<double>(2*3*4*5) );
- CALL_SUBTEST( test_complex<long double>(2*3*4*5) );
-
- CALL_SUBTEST( test_complex<float>(2*3*4*5*7) );
- CALL_SUBTEST( test_complex<double>(2*3*4*5*7) );
- CALL_SUBTEST( test_complex<long double>(2*3*4*5*7) );
-
-
-
- CALL_SUBTEST( test_scalar<float>(32) );
- CALL_SUBTEST( test_scalar<double>(32) );
- CALL_SUBTEST( test_scalar<long double>(32) );
-
- CALL_SUBTEST( test_scalar<float>(45) );
- CALL_SUBTEST( test_scalar<double>(45) );
- CALL_SUBTEST( test_scalar<long double>(45) );
-
- CALL_SUBTEST( test_scalar<float>(50) );
- CALL_SUBTEST( test_scalar<double>(50) );
- CALL_SUBTEST( test_scalar<long double>(50) );
-
- CALL_SUBTEST( test_scalar<float>(256) );
- CALL_SUBTEST( test_scalar<double>(256) );
- CALL_SUBTEST( test_scalar<long double>(256) );
-
- CALL_SUBTEST( test_scalar<float>(2*3*4*5*7) );
- CALL_SUBTEST( test_scalar<double>(2*3*4*5*7) );
- CALL_SUBTEST( test_scalar<long double>(2*3*4*5*7) );
-}
-#else
#define test_FFTW test_FFT
#include "FFTW.cpp"
-#endif
diff --git a/unsupported/test/FFTW.cpp b/unsupported/test/FFTW.cpp
index 94de53e36..838ddb238 100644
--- a/unsupported/test/FFTW.cpp
+++ b/unsupported/test/FFTW.cpp
@@ -23,7 +23,6 @@
// Eigen. If not, see <http://www.gnu.org/licenses/>.
#include "main.h"
-#include <iostream>
#include <unsupported/Eigen/FFT>
template <typename T>
@@ -107,8 +106,6 @@ void test_scalar_generic(int nfft)
for (int k=0;k<nfft;++k)
tbuf[k]= (T)( rand()/(double)RAND_MAX - .5);
- cout << "tbuf=["; for (size_t i=0;i<(size_t) tbuf.size();++i) {cout << tbuf[i] << " ";} cout << "];\n";
-
// make sure it DOESN'T give the right full spectrum answer
// if we've asked for half-spectrum
fft.SetFlag(fft.HalfSpectrum );
@@ -125,9 +122,7 @@ void test_scalar_generic(int nfft)
return; // odd FFTs get the wrong size inverse FFT
ScalarVector tbuf2;
- cout << "freqBuf=["; for (size_t i=0;i<(size_t) freqBuf.size();++i) {cout << freqBuf[i] << " ";} cout << "];\n";
fft.inv( tbuf2 , freqBuf);
- cout << "tbuf2=["; for (size_t i=0;i<(size_t) tbuf2.size();++i) {cout << tbuf2[i] << " ";} cout << "];\n";
VERIFY( dif_rmse(tbuf,tbuf2) < test_precision<T>() );// gross check
@@ -135,9 +130,7 @@ void test_scalar_generic(int nfft)
ScalarVector tbuf3;
fft.SetFlag(fft.Unscaled);
- cout << "freqBuf=["; for (size_t i=0;i<(size_t) freqBuf.size();++i) {cout << freqBuf[i] << " ";} cout << "];\n";
fft.inv( tbuf3 , freqBuf);
- cout << "tbuf3=["; for (size_t i=0;i<(size_t) tbuf3.size();++i) {cout << tbuf3[i] << " ";} cout << "];\n";
for (int k=0;k<nfft;++k)
tbuf3[k] *= T(1./nfft);
@@ -146,8 +139,6 @@ void test_scalar_generic(int nfft)
//for (size_t i=0;i<(size_t) tbuf.size();++i)
// cout << "freqBuf=" << freqBuf[i] << " in2=" << tbuf3[i] << " - in=" << tbuf[i] << " => " << (tbuf3[i] - tbuf[i] ) << endl;
- cout << "dif_rmse = " << dif_rmse(tbuf,tbuf3) << endl;
- cout << "test_precision = " << test_precision<T>() << endl;
VERIFY( dif_rmse(tbuf,tbuf3) < test_precision<T>() );// gross check
// verify that ClearFlag works
diff --git a/unsupported/test/NonLinearOptimization.cpp b/unsupported/test/NonLinearOptimization.cpp
index ae587f016..1313726a1 100644
--- a/unsupported/test/NonLinearOptimization.cpp
+++ b/unsupported/test/NonLinearOptimization.cpp
@@ -216,7 +216,7 @@ void testLmder()
// check covariance
covfac = fnorm*fnorm/(m-n);
- ei_covar(lm.fjac, lm.ipvt); // TODO : move this as a function of lm
+ ei_covar(lm.fjac, lm.permutation.indices()); // TODO : move this as a function of lm
MatrixXd cov_ref(n,n);
cov_ref <<
@@ -605,7 +605,7 @@ void testLmdif()
// check covariance
covfac = fnorm*fnorm/(m-n);
- ei_covar(lm.fjac, lm.ipvt);
+ ei_covar(lm.fjac, lm.permutation.indices()); // TODO : move this as a function of lm
MatrixXd cov_ref(n,n);
cov_ref <<
@@ -692,8 +692,8 @@ void testNistChwirut2(void)
x<< 0.15, 0.008, 0.010;
// do the computation
lm.resetParameters();
- lm.parameters.ftol = 1.E6*epsilon<double>();
- lm.parameters.xtol = 1.E6*epsilon<double>();
+ lm.parameters.ftol = 1.E6*NumTraits<double>::epsilon();
+ lm.parameters.xtol = 1.E6*NumTraits<double>::epsilon();
info = lm.minimize(x);
// check return value
@@ -1010,7 +1010,7 @@ void testNistLanczos1(void)
VERIFY( 79 == lm.nfev);
VERIFY( 72 == lm.njev);
// check norm^2
- VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 1.429604433690E-25); // should be 1.4307867721E-25, but nist results are on 128-bit floats
+ VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 1.430899764097e-25); // should be 1.4307867721E-25, but nist results are on 128-bit floats
// check x
VERIFY_IS_APPROX(x[0], 9.5100000027E-02 );
VERIFY_IS_APPROX(x[1], 1.0000000001E+00 );
@@ -1031,7 +1031,7 @@ void testNistLanczos1(void)
VERIFY( 9 == lm.nfev);
VERIFY( 8 == lm.njev);
// check norm^2
- VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 1.43049947737308E-25); // should be 1.4307867721E-25, but nist results are on 128-bit floats
+ VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 1.428595533845e-25); // should be 1.4307867721E-25, but nist results are on 128-bit floats
// check x
VERIFY_IS_APPROX(x[0], 9.5100000027E-02 );
VERIFY_IS_APPROX(x[1], 1.0000000001E+00 );
@@ -1170,9 +1170,9 @@ void testNistMGH10(void)
info = lm.minimize(x);
// check return value
- VERIFY( 2 == info);
- VERIFY( 285 == lm.nfev);
- VERIFY( 250 == lm.njev);
+ VERIFY( 2 == info);
+ VERIFY( 284 == lm.nfev);
+ VERIFY( 249 == lm.njev);
// check norm^2
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 8.7945855171E+01);
// check x
@@ -1188,7 +1188,7 @@ void testNistMGH10(void)
info = lm.minimize(x);
// check return value
- VERIFY( 2 == info);
+ VERIFY( 3 == info);
VERIFY( 126 == lm.nfev);
VERIFY( 116 == lm.njev);
// check norm^2
@@ -1243,8 +1243,8 @@ void testNistBoxBOD(void)
// do the computation
BoxBOD_functor functor;
LevenbergMarquardt<BoxBOD_functor> lm(functor);
- lm.parameters.ftol = 1.E6*epsilon<double>();
- lm.parameters.xtol = 1.E6*epsilon<double>();
+ lm.parameters.ftol = 1.E6*NumTraits<double>::epsilon();
+ lm.parameters.xtol = 1.E6*NumTraits<double>::epsilon();
lm.parameters.factor = 10.;
info = lm.minimize(x);
@@ -1264,14 +1264,14 @@ void testNistBoxBOD(void)
x<< 100., 0.75;
// do the computation
lm.resetParameters();
- lm.parameters.ftol = epsilon<double>();
- lm.parameters.xtol = epsilon<double>();
+ lm.parameters.ftol = NumTraits<double>::epsilon();
+ lm.parameters.xtol = NumTraits<double>::epsilon();
info = lm.minimize(x);
// check return value
- VERIFY( 1 == info);
- VERIFY( 15 == lm.nfev);
- VERIFY( 14 == lm.njev);
+ VERIFY( 1 == info);
+ VERIFY( 15 == lm.nfev);
+ VERIFY( 14 == lm.njev);
// check norm^2
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 1.1680088766E+03);
// check x
@@ -1325,15 +1325,15 @@ void testNistMGH17(void)
// do the computation
MGH17_functor functor;
LevenbergMarquardt<MGH17_functor> lm(functor);
- lm.parameters.ftol = epsilon<double>();
- lm.parameters.xtol = epsilon<double>();
+ lm.parameters.ftol = NumTraits<double>::epsilon();
+ lm.parameters.xtol = NumTraits<double>::epsilon();
lm.parameters.maxfev = 1000;
info = lm.minimize(x);
// check return value
- VERIFY( 1 == info);
- VERIFY( 599 == lm.nfev);
- VERIFY( 544 == lm.njev);
+ VERIFY( 2 == info);
+ VERIFY( 602 == lm.nfev);
+ VERIFY( 545 == lm.njev);
// check norm^2
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 5.4648946975E-05);
// check x
@@ -1418,16 +1418,16 @@ void testNistMGH09(void)
info = lm.minimize(x);
// check return value
- VERIFY( 1 == info);
- VERIFY( 503== lm.nfev);
- VERIFY( 385 == lm.njev);
+ VERIFY( 1 == info);
+ VERIFY( 490 == lm.nfev);
+ VERIFY( 376 == lm.njev);
// check norm^2
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 3.0750560385E-04);
// check x
- VERIFY_IS_APPROX(x[0], 0.19280624); // should be 1.9280693458E-01
- VERIFY_IS_APPROX(x[1], 0.19129774); // should be 1.9128232873E-01
- VERIFY_IS_APPROX(x[2], 0.12305940); // should be 1.2305650693E-01
- VERIFY_IS_APPROX(x[3], 0.13606946); // should be 1.3606233068E-01
+ VERIFY_IS_APPROX(x[0], 0.1928077089); // should be 1.9280693458E-01
+ VERIFY_IS_APPROX(x[1], 0.19126423573); // should be 1.9128232873E-01
+ VERIFY_IS_APPROX(x[2], 0.12305309914); // should be 1.2305650693E-01
+ VERIFY_IS_APPROX(x[3], 0.13605395375); // should be 1.3606233068E-01
/*
* Second try
@@ -1584,8 +1584,8 @@ void testNistThurber(void)
// do the computation
thurber_functor functor;
LevenbergMarquardt<thurber_functor> lm(functor);
- lm.parameters.ftol = 1.E4*epsilon<double>();
- lm.parameters.xtol = 1.E4*epsilon<double>();
+ lm.parameters.ftol = 1.E4*NumTraits<double>::epsilon();
+ lm.parameters.xtol = 1.E4*NumTraits<double>::epsilon();
info = lm.minimize(x);
// check return value
@@ -1609,8 +1609,8 @@ void testNistThurber(void)
x<< 1300 ,1500 ,500 ,75 ,1 ,0.4 ,0.05 ;
// do the computation
lm.resetParameters();
- lm.parameters.ftol = 1.E4*epsilon<double>();
- lm.parameters.xtol = 1.E4*epsilon<double>();
+ lm.parameters.ftol = 1.E4*NumTraits<double>::epsilon();
+ lm.parameters.xtol = 1.E4*NumTraits<double>::epsilon();
info = lm.minimize(x);
// check return value
@@ -1676,8 +1676,8 @@ void testNistRat43(void)
// do the computation
rat43_functor functor;
LevenbergMarquardt<rat43_functor> lm(functor);
- lm.parameters.ftol = 1.E6*epsilon<double>();
- lm.parameters.xtol = 1.E6*epsilon<double>();
+ lm.parameters.ftol = 1.E6*NumTraits<double>::epsilon();
+ lm.parameters.xtol = 1.E6*NumTraits<double>::epsilon();
info = lm.minimize(x);
// check return value
@@ -1698,8 +1698,8 @@ void testNistRat43(void)
x<< 700., 5., 0.75, 1.3;
// do the computation
lm.resetParameters();
- lm.parameters.ftol = 1.E5*epsilon<double>();
- lm.parameters.xtol = 1.E5*epsilon<double>();
+ lm.parameters.ftol = 1.E5*NumTraits<double>::epsilon();
+ lm.parameters.xtol = 1.E5*NumTraits<double>::epsilon();
info = lm.minimize(x);
// check return value
@@ -1833,7 +1833,6 @@ void test_NonLinearOptimization()
/*
* Can be useful for debugging...
- printf("info, nfev, njev : %d, %d, %d\n", info, lm.nfev, lm.njev);
printf("info, nfev : %d, %d\n", info, lm.nfev);
printf("info, nfev, njev : %d, %d, %d\n", info, solver.nfev, solver.njev);
printf("info, nfev : %d, %d\n", info, solver.nfev);
@@ -1843,5 +1842,14 @@ void test_NonLinearOptimization()
printf("x[3] : %.32g\n", x[3]);
printf("fvec.blueNorm() : %.32g\n", solver.fvec.blueNorm());
printf("fvec.blueNorm() : %.32g\n", lm.fvec.blueNorm());
+
+ printf("info, nfev, njev : %d, %d, %d\n", info, lm.nfev, lm.njev);
+ printf("fvec.squaredNorm() : %.13g\n", lm.fvec.squaredNorm());
+ std::cout << x << std::endl;
+ std::cout.precision(9);
+ std::cout << x[0] << std::endl;
+ std::cout << x[1] << std::endl;
+ std::cout << x[2] << std::endl;
+ std::cout << x[3] << std::endl;
*/
diff --git a/unsupported/test/matrix_exponential.cpp b/unsupported/test/matrix_exponential.cpp
index a5b40adde..6150439c5 100644
--- a/unsupported/test/matrix_exponential.cpp
+++ b/unsupported/test/matrix_exponential.cpp
@@ -61,7 +61,7 @@ void test2dRotation(double tol)
std::cout << "test2dRotation: i = " << i << " error funm = " << relerr(C, B);
VERIFY(C.isApprox(B, static_cast<T>(tol)));
- ei_matrix_exponential(angle*A, &C);
+ C = ei_matrix_exponential(angle*A);
std::cout << " error expm = " << relerr(C, B) << "\n";
VERIFY(C.isApprox(B, static_cast<T>(tol)));
}
@@ -86,7 +86,7 @@ void test2dHyperbolicRotation(double tol)
std::cout << "test2dHyperbolicRotation: i = " << i << " error funm = " << relerr(C, B);
VERIFY(C.isApprox(B, static_cast<T>(tol)));
- ei_matrix_exponential(A, &C);
+ C = ei_matrix_exponential(A);
std::cout << " error expm = " << relerr(C, B) << "\n";
VERIFY(C.isApprox(B, static_cast<T>(tol)));
}
@@ -110,7 +110,7 @@ void testPascal(double tol)
std::cout << "testPascal: size = " << size << " error funm = " << relerr(C, B);
VERIFY(C.isApprox(B, static_cast<T>(tol)));
- ei_matrix_exponential(A, &C);
+ C = ei_matrix_exponential(A);
std::cout << " error expm = " << relerr(C, B) << "\n";
VERIFY(C.isApprox(B, static_cast<T>(tol)));
}
@@ -137,10 +137,9 @@ void randomTest(const MatrixType& m, double tol)
std::cout << "randomTest: error funm = " << relerr(identity, m2 * m3);
VERIFY(identity.isApprox(m2 * m3, static_cast<RealScalar>(tol)));
- ei_matrix_exponential(m1, &m2);
- ei_matrix_exponential(-m1, &m3);
- std::cout << " error expm = " << relerr(identity, m2 * m3) << "\n";
- VERIFY(identity.isApprox(m2 * m3, static_cast<RealScalar>(tol)));
+ m2 = ei_matrix_exponential(m1) * ei_matrix_exponential(-m1);
+ std::cout << " error expm = " << relerr(identity, m2) << "\n";
+ VERIFY(identity.isApprox(m2, static_cast<RealScalar>(tol)));
}
}
diff --git a/unsupported/test/matrix_function.cpp b/unsupported/test/matrix_function.cpp
index 0eb30eecb..4ff6d7f1e 100644
--- a/unsupported/test/matrix_function.cpp
+++ b/unsupported/test/matrix_function.cpp
@@ -25,44 +25,100 @@
#include "main.h"
#include <unsupported/Eigen/MatrixFunctions>
+// Returns a matrix with eigenvalues clustered around 0, 1 and 2.
template<typename MatrixType>
-void testMatrixExponential(const MatrixType& m)
+MatrixType randomMatrixWithRealEivals(const int size)
+{
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::RealScalar RealScalar;
+ MatrixType diag = MatrixType::Zero(size, size);
+ for (int i = 0; i < size; ++i) {
+ diag(i, i) = Scalar(RealScalar(ei_random<int>(0,2)))
+ + ei_random<Scalar>() * Scalar(RealScalar(0.01));
+ }
+ MatrixType A = MatrixType::Random(size, size);
+ return A.inverse() * diag * A;
+}
+
+template <typename MatrixType, int IsComplex = NumTraits<typename ei_traits<MatrixType>::Scalar>::IsComplex>
+struct randomMatrixWithImagEivals
+{
+ // Returns a matrix with eigenvalues clustered around 0 and +/- i.
+ static MatrixType run(const int size);
+};
+
+// Partial specialization for real matrices
+template<typename MatrixType>
+struct randomMatrixWithImagEivals<MatrixType, 0>
+{
+ static MatrixType run(const int size)
+ {
+ typedef typename MatrixType::Scalar Scalar;
+ MatrixType diag = MatrixType::Zero(size, size);
+ int i = 0;
+ while (i < size) {
+ int randomInt = ei_random<int>(-1, 1);
+ if (randomInt == 0 || i == size-1) {
+ diag(i, i) = ei_random<Scalar>() * Scalar(0.01);
+ ++i;
+ } else {
+ Scalar alpha = Scalar(randomInt) + ei_random<Scalar>() * Scalar(0.01);
+ diag(i, i+1) = alpha;
+ diag(i+1, i) = -alpha;
+ i += 2;
+ }
+ }
+ MatrixType A = MatrixType::Random(size, size);
+ return A.inverse() * diag * A;
+ }
+};
+
+// Partial specialization for complex matrices
+template<typename MatrixType>
+struct randomMatrixWithImagEivals<MatrixType, 1>
+{
+ static MatrixType run(const int size)
+ {
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::RealScalar RealScalar;
+ const Scalar imagUnit(0, 1);
+ MatrixType diag = MatrixType::Zero(size, size);
+ for (int i = 0; i < size; ++i) {
+ diag(i, i) = Scalar(RealScalar(ei_random<int>(-1, 1))) * imagUnit
+ + ei_random<Scalar>() * Scalar(RealScalar(0.01));
+ }
+ MatrixType A = MatrixType::Random(size, size);
+ return A.inverse() * diag * A;
+ }
+};
+
+template<typename MatrixType>
+void testMatrixExponential(const MatrixType& A)
{
typedef typename ei_traits<MatrixType>::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef std::complex<RealScalar> ComplexScalar;
- const int rows = m.rows();
- const int cols = m.cols();
-
for (int i = 0; i < g_repeat; i++) {
- MatrixType A = MatrixType::Random(rows, cols);
- MatrixType expA1, expA2;
- ei_matrix_exponential(A, &expA1);
- ei_matrix_function(A, StdStemFunctions<ComplexScalar>::exp, &expA2);
- VERIFY_IS_APPROX(expA1, expA2);
+ VERIFY_IS_APPROX(ei_matrix_exponential(A),
+ ei_matrix_function(A, StdStemFunctions<ComplexScalar>::exp));
}
}
template<typename MatrixType>
-void testHyperbolicFunctions(const MatrixType& m)
+void testHyperbolicFunctions(const MatrixType& A)
{
- const int rows = m.rows();
- const int cols = m.cols();
-
for (int i = 0; i < g_repeat; i++) {
- MatrixType A = MatrixType::Random(rows, cols);
- MatrixType sinhA, coshA, expA;
- ei_matrix_sinh(A, &sinhA);
- ei_matrix_cosh(A, &coshA);
- ei_matrix_exponential(A, &expA);
+ MatrixType sinhA = ei_matrix_sinh(A);
+ MatrixType coshA = ei_matrix_cosh(A);
+ MatrixType expA = ei_matrix_exponential(A);
VERIFY_IS_APPROX(sinhA, (expA - expA.inverse())/2);
VERIFY_IS_APPROX(coshA, (expA + expA.inverse())/2);
}
}
template<typename MatrixType>
-void testGonioFunctions(const MatrixType& m)
+void testGonioFunctions(const MatrixType& A)
{
typedef ei_traits<MatrixType> Traits;
typedef typename Traits::Scalar Scalar;
@@ -71,36 +127,44 @@ void testGonioFunctions(const MatrixType& m)
typedef Matrix<ComplexScalar, Traits::RowsAtCompileTime,
Traits::ColsAtCompileTime, MatrixType::Options> ComplexMatrix;
- const int rows = m.rows();
- const int cols = m.cols();
ComplexScalar imagUnit(0,1);
ComplexScalar two(2,0);
for (int i = 0; i < g_repeat; i++) {
- MatrixType A = MatrixType::Random(rows, cols);
ComplexMatrix Ac = A.template cast<ComplexScalar>();
- ComplexMatrix exp_iA;
- ei_matrix_exponential(imagUnit * Ac, &exp_iA);
+ ComplexMatrix exp_iA = ei_matrix_exponential(imagUnit * Ac);
- MatrixType sinA;
- ei_matrix_sin(A, &sinA);
+ MatrixType sinA = ei_matrix_sin(A);
ComplexMatrix sinAc = sinA.template cast<ComplexScalar>();
VERIFY_IS_APPROX(sinAc, (exp_iA - exp_iA.inverse()) / (two*imagUnit));
- MatrixType cosA;
- ei_matrix_cos(A, &cosA);
+ MatrixType cosA = ei_matrix_cos(A);
ComplexMatrix cosAc = cosA.template cast<ComplexScalar>();
VERIFY_IS_APPROX(cosAc, (exp_iA + exp_iA.inverse()) / 2);
}
}
template<typename MatrixType>
+void testMatrix(const MatrixType& A)
+{
+ testMatrixExponential(A);
+ testHyperbolicFunctions(A);
+ testGonioFunctions(A);
+}
+
+template<typename MatrixType>
void testMatrixType(const MatrixType& m)
{
- testMatrixExponential(m);
- testHyperbolicFunctions(m);
- testGonioFunctions(m);
+ // Matrices with clustered eigenvalue lead to different code paths
+ // in MatrixFunction.h and are thus useful for testing.
+
+ const int size = m.rows();
+ for (int i = 0; i < g_repeat; i++) {
+ testMatrix(MatrixType::Random(size, size).eval());
+ testMatrix(randomMatrixWithRealEivals<MatrixType>(size));
+ testMatrix(randomMatrixWithImagEivals<MatrixType>::run(size));
+ }
}
void test_matrix_function()