aboutsummaryrefslogtreecommitdiffhomepage
path: root/doc
diff options
context:
space:
mode:
Diffstat (limited to 'doc')
-rw-r--r--doc/A05_PortingFrom2To3.dox2
-rw-r--r--doc/CMakeLists.txt4
-rw-r--r--doc/CoeffwiseMathFunctionsTable.dox525
-rw-r--r--doc/CustomizingEigen.dox226
-rw-r--r--doc/CustomizingEigen_CustomScalar.dox120
-rw-r--r--doc/CustomizingEigen_InheritingMatrix.dox34
-rw-r--r--doc/CustomizingEigen_NullaryExpr.dox86
-rw-r--r--doc/CustomizingEigen_Plugins.dox69
-rw-r--r--doc/DenseDecompositionBenchmark.dox42
-rw-r--r--doc/Doxyfile.in21
-rw-r--r--doc/InplaceDecomposition.dox115
-rw-r--r--doc/Manual.dox23
-rw-r--r--doc/MatrixfreeSolverExample.dox8
-rw-r--r--doc/NewExpressionType.dox8
-rw-r--r--doc/Overview.dox4
-rw-r--r--doc/PreprocessorDirectives.dox33
-rw-r--r--doc/SparseLinearSystems.dox3
-rw-r--r--doc/SparseQuickReference.dox16
-rw-r--r--doc/TopicAssertions.dox2
-rw-r--r--doc/TopicLinearAlgebraDecompositions.dox2
-rw-r--r--doc/UnalignedArrayAssert.dox21
-rw-r--r--doc/UsingBlasLapackBackends.dox133
-rw-r--r--doc/UsingIntelMKL.dox94
-rw-r--r--doc/eigendoxy.css31
-rw-r--r--doc/examples/CMakeLists.txt5
-rw-r--r--doc/examples/Cwise_erf.cpp9
-rw-r--r--doc/examples/Cwise_erfc.cpp9
-rw-r--r--doc/examples/Cwise_lgamma.cpp9
-rw-r--r--doc/examples/TutorialInplaceLU.cpp61
-rw-r--r--doc/examples/make_circulant2.cpp52
-rw-r--r--doc/examples/nullary_indexing.cpp66
-rw-r--r--doc/ftv2node.pngbin0 -> 86 bytes
-rw-r--r--doc/ftv2pnode.pngbin0 -> 229 bytes
-rw-r--r--doc/snippets/CMakeLists.txt2
-rw-r--r--doc/snippets/Cwise_erf.cpp2
-rw-r--r--doc/snippets/Cwise_erfc.cpp2
-rw-r--r--doc/snippets/Cwise_lgamma.cpp2
-rw-r--r--doc/snippets/SparseMatrix_coeffs.cpp9
-rw-r--r--doc/snippets/compile_snippet.cpp.in5
-rw-r--r--doc/special_examples/random_cpp11.cpp2
40 files changed, 1509 insertions, 348 deletions
diff --git a/doc/A05_PortingFrom2To3.dox b/doc/A05_PortingFrom2To3.dox
index 0dbddb976..51555f996 100644
--- a/doc/A05_PortingFrom2To3.dox
+++ b/doc/A05_PortingFrom2To3.dox
@@ -261,7 +261,7 @@ use it unless you are sure of what you are doing, i.e., you have rigourosly meas
The EIGEN_ALIGN_128 macro has been renamed to EIGEN_ALIGN16. Don't be surprised, it's just that we switched to counting in bytes ;-)
-The EIGEN_DONT_ALIGN option still exists in Eigen 3, but it has a new cousin: EIGEN_DONT_ALIGN_STATICALLY. It allows to get rid of all static alignment issues while keeping alignment of dynamic-size heap-allocated arrays, thus keeping vectorization for dynamic-size objects.
+The \link TopicPreprocessorDirectivesPerformance EIGEN_DONT_ALIGN \endlink option still exists in Eigen 3, but it has a new cousin: \link TopicPreprocessorDirectivesPerformance EIGEN_DONT_ALIGN_STATICALLY.\endlink It allows to get rid of all static alignment issues while keeping alignment of dynamic-size heap-allocated arrays. Vectorization of statically allocated arrays is still preserved (unless you define \link TopicPreprocessorDirectivesPerformance EIGEN_UNALIGNED_VECTORIZE \endlink =0), at the cost of unaligned memory stores.
\section AlignedMap Aligned Map objects
diff --git a/doc/CMakeLists.txt b/doc/CMakeLists.txt
index 4d01a0424..db413bc65 100644
--- a/doc/CMakeLists.txt
+++ b/doc/CMakeLists.txt
@@ -78,6 +78,8 @@ add_custom_target(
COMMAND ${CMAKE_COMMAND} -E make_directory ${CMAKE_CURRENT_BINARY_DIR}/html/
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/eigen_navtree_hacks.js ${CMAKE_CURRENT_BINARY_DIR}/html/
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/Eigen_Silly_Professor_64x64.png ${CMAKE_CURRENT_BINARY_DIR}/html/
+ COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/ftv2pnode.png ${CMAKE_CURRENT_BINARY_DIR}/html/
+ COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/ftv2node.png ${CMAKE_CURRENT_BINARY_DIR}/html/
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/AsciiQuickReference.txt ${CMAKE_CURRENT_BINARY_DIR}/html/
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
)
@@ -88,6 +90,8 @@ add_custom_target(
COMMAND ${CMAKE_COMMAND} -E make_directory ${Eigen_BINARY_DIR}/doc/html/unsupported
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/eigen_navtree_hacks.js ${CMAKE_CURRENT_BINARY_DIR}/html/unsupported/
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/Eigen_Silly_Professor_64x64.png ${CMAKE_CURRENT_BINARY_DIR}/html/unsupported/
+ COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/ftv2pnode.png ${CMAKE_CURRENT_BINARY_DIR}/html/unsupported/
+ COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/ftv2node.png ${CMAKE_CURRENT_BINARY_DIR}/html/unsupported/
WORKING_DIRECTORY ${Eigen_BINARY_DIR}/doc
)
diff --git a/doc/CoeffwiseMathFunctionsTable.dox b/doc/CoeffwiseMathFunctionsTable.dox
new file mode 100644
index 000000000..ac6e0bd31
--- /dev/null
+++ b/doc/CoeffwiseMathFunctionsTable.dox
@@ -0,0 +1,525 @@
+namespace Eigen {
+
+/** \eigenManualPage CoeffwiseMathFunctions Catalog of coefficient-wise math functions
+
+
+<!-- <span style="font-size:300%; color:red; font-weight: 900;">!WORK IN PROGRESS!</span> -->
+
+This table presents a catalog of the coefficient-wise math functions supported by %Eigen.
+In this table, \c a, \c b, refer to Array objects or expressions, and \c m refers to a linear algebra Matrix/Vector object. Standard scalar types are abbreviated as follows:
+ - \c int: \c i32
+ - \c float: \c f
+ - \c double: \c d
+ - \c std::complex<float>: \c cf
+ - \c std::complex<double>: \c cd
+
+For each row, the first column list the equivalent calls for arrays, and matrices when supported. Of course, all functions are available for matrices by first casting it as an array: \c m.array().
+
+The third column gives some hints in the underlying scalar implementation. In most cases, %Eigen does not implement itself the math function but relies on the STL for standard scalar types, or user-provided functions for custom scalar types.
+For instance, some simply calls the respective function of the STL while preserving <a href="http://en.cppreference.com/w/cpp/language/adl">argument-dependent lookup</a> for custom types.
+The following:
+\code
+using std::foo;
+foo(a[i]);
+\endcode
+means that the STL's function \c std::foo will be potentially called if it is compatible with the underlying scalar type. If not, then the user must ensure that an overload of the function foo is available for the given scalar type (usually defined in the same namespace as the given scalar type).
+This also means that, unless specified, if the function \c std::foo is available only in some recent c++ versions (e.g., c++11), then the respective %Eigen's function/method will be usable on standard types only if the compiler support the required c++ version.
+
+<table class="manual-hl">
+<tr>
+<th>API</th><th>Description</th><th>Default scalar implementation</th><th>SIMD</th>
+</tr>
+<tr><td colspan="4"></td></tr>
+<tr><th colspan="4">Basic operations</th></tr>
+<tr>
+ <td class="code">
+ \anchor cwisetable_abs
+ a.\link ArrayBase::abs abs\endlink(); \n
+ \link Eigen::abs abs\endlink(a); \n
+ m.\link MatrixBase::cwiseAbs cwiseAbs\endlink();
+ </td>
+ <td>absolute value (\f$ |a_i| \f$) </td>
+ <td class="code">
+ using <a href="http://en.cppreference.com/w/cpp/numeric/math/fabs">std::abs</a>; \n
+ abs(a[i]);
+ </td>
+ <td>SSE2, AVX (i32,f,d)</td>
+</tr>
+<tr>
+ <td class="code">
+ \anchor cwisetable_inverse
+ a.\link ArrayBase::inverse inverse\endlink(); \n
+ \link Eigen::inverse inverse\endlink(a); \n
+ m.\link MatrixBase::cwiseInverse cwiseInverse\endlink();
+ </td>
+ <td>inverse value (\f$ 1/a_i \f$) </td>
+ <td class="code">
+ 1/a[i];
+ </td>
+ <td>All engines (f,d,fc,fd)</td>
+</tr>
+<tr>
+ <td class="code">
+ \anchor cwisetable_conj
+ a.\link ArrayBase::conjugate conjugate\endlink(); \n
+ \link Eigen::conj conj\endlink(a); \n
+ m.\link MatrixBase::conjugate conjugate();
+ </td>
+ <td><a href="https://en.wikipedia.org/wiki/Complex_conjugate">complex conjugate</a> (\f$ \bar{a_i} \f$),\n
+ no-op for real </td>
+ <td class="code">
+ using <a href="http://en.cppreference.com/w/cpp/numeric/complex/conj">std::conj</a>; \n
+ conj(a[i]);
+ </td>
+ <td>All engines (fc,fd)</td>
+</tr>
+<tr>
+<th colspan="4">Exponential functions</th>
+</tr>
+<tr>
+ <td class="code">
+ \anchor cwisetable_exp
+ a.\link ArrayBase::exp exp\endlink(); \n
+ \link Eigen::exp exp\endlink(a);
+ </td>
+ <td>\f$ e \f$ raised to the given power (\f$ e^{a_i} \f$) </td>
+ <td class="code">
+ using <a href="http://en.cppreference.com/w/cpp/numeric/math/exp">std::exp</a>; \n
+ exp(a[i]);
+ </td>
+ <td>SSE2, AVX (f,d)</td>
+</tr>
+<tr>
+ <td class="code">
+ \anchor cwisetable_log
+ a.\link ArrayBase::log log\endlink(); \n
+ \link Eigen::log log\endlink(a);
+ </td>
+ <td>natural (base \f$ e \f$) logarithm (\f$ \ln({a_i}) \f$)</td>
+ <td class="code">
+ using <a href="http://en.cppreference.com/w/cpp/numeric/math/log">std::log</a>; \n
+ log(a[i]);
+ </td>
+ <td>SSE2, AVX (f)</td>
+</tr>
+<tr>
+ <td class="code">
+ \anchor cwisetable_log1p
+ a.\link ArrayBase::log1p log1p\endlink(); \n
+ \link Eigen::log1p log1p\endlink(a);
+ </td>
+ <td>natural (base \f$ e \f$) logarithm of 1 plus \n the given number (\f$ \ln({1+a_i}) \f$)</td>
+ <td>built-in generic implementation based on \c log,\n
+ plus \c using <a href="http://en.cppreference.com/w/cpp/numeric/math/log1p">\c std::log1p </a>; \cpp11</td>
+ <td></td>
+</tr>
+<tr>
+ <td class="code">
+ \anchor cwisetable_log10
+ a.\link ArrayBase::log10 log10\endlink(); \n
+ \link Eigen::log10 log10\endlink(a);
+ </td>
+ <td>base 10 logarithm (\f$ \log_{10}({a_i}) \f$)</td>
+ <td class="code">
+ using <a href="http://en.cppreference.com/w/cpp/numeric/math/log10">std::log10</a>; \n
+ log10(a[i]);
+ </td>
+ <td></td>
+</tr>
+<tr>
+<th colspan="4">Power functions</th>
+</tr>
+<tr>
+ <td class="code">
+ \anchor cwisetable_pow
+ a.\link ArrayBase::pow pow\endlink(b); \n
+ \link Eigen::pow pow\endlink(a,b);
+ </td>
+ <td>raises a number to the given power (\f$ a_i ^ {b_i} \f$) \n \c a and \c b can be either an array or scalar.</td>
+ <td class="code">
+ using <a href="http://en.cppreference.com/w/cpp/numeric/math/pow">std::pow</a>; \n
+ pow(a[i],b[i]);\n
+ (plus builtin for integer types)</td>
+ <td></td>
+</tr>
+<tr>
+ <td class="code">
+ \anchor cwisetable_sqrt
+ a.\link ArrayBase::sqrt sqrt\endlink(); \n
+ \link Eigen::sqrt sqrt\endlink(a);\n
+ m.\link MatrixBase::cwiseSqrt cwiseSqrt\endlink();
+ </td>
+ <td>computes square root (\f$ \sqrt a_i \f$)</td>
+ <td class="code">
+ using <a href="http://en.cppreference.com/w/cpp/numeric/math/sqrt">std::sqrt</a>; \n
+ sqrt(a[i]);</td>
+ <td>SSE2, AVX (f,d)</td>
+</tr>
+<tr>
+ <td class="code">
+ \anchor cwisetable_rsqrt
+ a.\link ArrayBase::rsqrt rsqrt\endlink(); \n
+ \link Eigen::rsqrt rsqrt\endlink(a);
+ </td>
+ <td><a href="https://en.wikipedia.org/wiki/Fast_inverse_square_root">reciprocal square root</a> (\f$ 1/{\sqrt a_i} \f$)</td>
+ <td class="code">
+ using <a href="http://en.cppreference.com/w/cpp/numeric/math/sqrt">std::sqrt</a>; \n
+ 1/sqrt(a[i]); \n
+ </td>
+ <td>SSE2, AVX, AltiVec, ZVector (f,d)\n
+ (approx + 1 Newton iteration)</td>
+</tr>
+<tr>
+ <td class="code">
+ \anchor cwisetable_square
+ a.\link ArrayBase::square square\endlink(); \n
+ \link Eigen::square square\endlink(a);
+ </td>
+ <td>computes square power (\f$ a_i^2 \f$)</td>
+ <td class="code">
+ a[i]*a[i]</td>
+ <td>All (i32,f,d,cf,cd)</td>
+</tr>
+<tr>
+ <td class="code">
+ \anchor cwisetable_cube
+ a.\link ArrayBase::cube cube\endlink(); \n
+ \link Eigen::cube cube\endlink(a);
+ </td>
+ <td>computes cubic power (\f$ a_i^3 \f$)</td>
+ <td class="code">
+ a[i]*a[i]*a[i]</td>
+ <td>All (i32,f,d,cf,cd)</td>
+</tr>
+<tr>
+ <td class="code">
+ \anchor cwisetable_abs2
+ a.\link ArrayBase::abs2 abs2\endlink(); \n
+ \link Eigen::abs2 abs2\endlink(a);\n
+ m.\link MatrixBase::cwiseAbs2 cwiseAbs2\endlink();
+ </td>
+ <td>computes the squared absolute value (\f$ |a_i|^2 \f$)</td>
+ <td class="code">
+ real: a[i]*a[i] \n
+ complex: real(a[i])*real(a[i]) \n
+ &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; + imag(a[i])*imag(a[i])</td>
+ <td>All (i32,f,d)</td>
+</tr>
+<tr>
+<th colspan="4">Trigonometric functions</th>
+</tr>
+<tr>
+ <td class="code">
+ \anchor cwisetable_sin
+ a.\link ArrayBase::sin sin\endlink(); \n
+ \link Eigen::sin sin\endlink(a);
+ </td>
+ <td>computes sine</td>
+ <td class="code">
+ using <a href="http://en.cppreference.com/w/cpp/numeric/math/sin">std::sin</a>; \n
+ sin(a[i]);</td>
+ <td>SSE2, AVX (f)</td>
+</tr>
+<tr>
+ <td class="code">
+ \anchor cwisetable_cos
+ a.\link ArrayBase::cos cos\endlink(); \n
+ \link Eigen::cos cos\endlink(a);
+ </td>
+ <td>computes cosine</td>
+ <td class="code">
+ using <a href="http://en.cppreference.com/w/cpp/numeric/math/cos">std::cos</a>; \n
+ cos(a[i]);</td>
+ <td>SSE2, AVX (f)</td>
+</tr>
+<tr>
+ <td class="code">
+ \anchor cwisetable_tan
+ a.\link ArrayBase::tan tan\endlink(); \n
+ \link Eigen::tan tan\endlink(a);
+ </td>
+ <td>computes tangent</td>
+ <td class="code">
+ using <a href="http://en.cppreference.com/w/cpp/numeric/math/tan">std::tan</a>; \n
+ tan(a[i]);</td>
+ <td></td>
+</tr>
+<tr>
+ <td class="code">
+ \anchor cwisetable_asin
+ a.\link ArrayBase::asin asin\endlink(); \n
+ \link Eigen::asin asin\endlink(a);
+ </td>
+ <td>computes arc sine (\f$ \sin^{-1} a_i \f$)</td>
+ <td class="code">
+ using <a href="http://en.cppreference.com/w/cpp/numeric/math/asin">std::asin</a>; \n
+ asin(a[i]);</td>
+ <td></td>
+</tr>
+<tr>
+ <td class="code">
+ \anchor cwisetable_acos
+ a.\link ArrayBase::acos acos\endlink(); \n
+ \link Eigen::acos acos\endlink(a);
+ </td>
+ <td>computes arc cosine (\f$ \cos^{-1} a_i \f$)</td>
+ <td class="code">
+ using <a href="http://en.cppreference.com/w/cpp/numeric/math/acos">std::acos</a>; \n
+ acos(a[i]);</td>
+ <td></td>
+</tr>
+<tr>
+ <td class="code">
+ \anchor cwisetable_atan
+ a.\link ArrayBase::atan tan\endlink(); \n
+ \link Eigen::atan atan\endlink(a);
+ </td>
+ <td>computes arc tangent (\f$ \tan^{-1} a_i \f$)</td>
+ <td class="code">
+ using <a href="http://en.cppreference.com/w/cpp/numeric/math/atan">std::atan</a>; \n
+ atan(a[i]);</td>
+ <td></td>
+</tr>
+<tr>
+<th colspan="4">Hyperbolic functions</th>
+</tr>
+<tr>
+ <td class="code">
+ \anchor cwisetable_sinh
+ a.\link ArrayBase::sinh sinh\endlink(); \n
+ \link Eigen::sinh sinh\endlink(a);
+ </td>
+ <td>computes hyperbolic sine</td>
+ <td class="code">
+ using <a href="http://en.cppreference.com/w/cpp/numeric/math/sinh">std::sinh</a>; \n
+ sinh(a[i]);</td>
+ <td></td>
+</tr>
+<tr>
+ <td class="code">
+ \anchor cwisetable_cosh
+ a.\link ArrayBase::cosh cohs\endlink(); \n
+ \link Eigen::cosh cosh\endlink(a);
+ </td>
+ <td>computes hyperbolic cosine</td>
+ <td class="code">
+ using <a href="http://en.cppreference.com/w/cpp/numeric/math/cosh">std::cosh</a>; \n
+ cosh(a[i]);</td>
+ <td></td>
+</tr>
+<tr>
+ <td class="code">
+ \anchor cwisetable_tanh
+ a.\link ArrayBase::tanh tanh\endlink(); \n
+ \link Eigen::tanh tanh\endlink(a);
+ </td>
+ <td>computes hyperbolic tangent</td>
+ <td class="code">
+ using <a href="http://en.cppreference.com/w/cpp/numeric/math/tanh">std::tanh</a>; \n
+ tanh(a[i]);</td>
+ <td></td>
+</tr>
+<tr>
+<th colspan="4">Nearest integer floating point operations</th>
+</tr>
+<tr>
+ <td class="code">
+ \anchor cwisetable_ceil
+ a.\link ArrayBase::ceil ceil\endlink(); \n
+ \link Eigen::ceil ceil\endlink(a);
+ </td>
+ <td>nearest integer not less than the given value</td>
+ <td class="code">
+ using <a href="http://en.cppreference.com/w/cpp/numeric/math/ceil">std::ceil</a>; \n
+ ceil(a[i]);</td>
+ <td>SSE4,AVX,ZVector (f,d)</td>
+</tr>
+<tr>
+ <td class="code">
+ \anchor cwisetable_floor
+ a.\link ArrayBase::floor floor\endlink(); \n
+ \link Eigen::floor floor\endlink(a);
+ </td>
+ <td>nearest integer not greater than the given value</td>
+ <td class="code">
+ using <a href="http://en.cppreference.com/w/cpp/numeric/math/floor">std::floor</a>; \n
+ floor(a[i]);</td>
+ <td>SSE4,AVX,ZVector (f,d)</td>
+</tr>
+<tr>
+ <td class="code">
+ \anchor cwisetable_round
+ a.\link ArrayBase::round round\endlink(); \n
+ \link Eigen::round round\endlink(a);
+ </td>
+ <td>nearest integer, \n rounding away from zero in halfway cases</td>
+ <td>built-in generic implementation \n based on \c floor and \c ceil,\n
+ plus \c using <a href="http://en.cppreference.com/w/cpp/numeric/math/round">\c std::round </a>; \cpp11</td>
+ <td>SSE4,AVX,ZVector (f,d)</td>
+</tr>
+<tr>
+<th colspan="4">Floating point manipulation functions</th>
+</tr>
+<tr>
+<th colspan="4">Classification and comparison</th>
+</tr>
+<tr>
+ <td class="code">
+ \anchor cwisetable_isfinite
+ a.\link ArrayBase::isfinite isfinite\endlink(); \n
+ \link Eigen::isfinite isfinite\endlink(a);
+ </td>
+ <td>checks if the given number has finite value</td>
+ <td>built-in generic implementation,\n
+ plus \c using <a href="http://en.cppreference.com/w/cpp/numeric/math/isfinite">\c std::isfinite </a>; \cpp11</td>
+ <td></td>
+</tr>
+<tr>
+ <td class="code">
+ \anchor cwisetable_isinf
+ a.\link ArrayBase::isinf isinf\endlink(); \n
+ \link Eigen::isinf isinf\endlink(a);
+ </td>
+ <td>checks if the given number is infinite</td>
+ <td>built-in generic implementation,\n
+ plus \c using <a href="http://en.cppreference.com/w/cpp/numeric/math/isinf">\c std::isinf </a>; \cpp11</td>
+ <td></td>
+</tr>
+<tr>
+ <td class="code">
+ \anchor cwisetable_isnan
+ a.\link ArrayBase::isnan isnan\endlink(); \n
+ \link Eigen::isnan isnan\endlink(a);
+ </td>
+ <td>checks if the given number is not a number</td>
+ <td>built-in generic implementation,\n
+ plus \c using <a href="http://en.cppreference.com/w/cpp/numeric/math/isnan">\c std::isnan </a>; \cpp11</td>
+ <td></td>
+</tr>
+<tr>
+<th colspan="4">Error and gamma functions</th>
+</tr>
+<tr> <td colspan="4"> Require \c #include \c <unsupported/Eigen/SpecialFunctions> </td></tr>
+<tr>
+ <td class="code">
+ \anchor cwisetable_erf
+ a.\link ArrayBase::erf erf\endlink(); \n
+ \link Eigen::erf erf\endlink(a);
+ </td>
+ <td>error function</td>
+ <td class="code">
+ using <a href="http://en.cppreference.com/w/cpp/numeric/math/erf">std::erf</a>; \cpp11 \n
+ erf(a[i]);
+ </td>
+ <td></td>
+</tr>
+<tr>
+ <td class="code">
+ \anchor cwisetable_erfc
+ a.\link ArrayBase::erfc erfc\endlink(); \n
+ \link Eigen::erfc erfc\endlink(a);
+ </td>
+ <td>complementary error function</td>
+ <td class="code">
+ using <a href="http://en.cppreference.com/w/cpp/numeric/math/erfc">std::erfc</a>; \cpp11 \n
+ erfc(a[i]);
+ </td>
+ <td></td>
+</tr>
+<tr>
+ <td class="code">
+ \anchor cwisetable_lgamma
+ a.\link ArrayBase::lgamma lgamma\endlink(); \n
+ \link Eigen::lgamma lgamma\endlink(a);
+ </td>
+ <td>natural logarithm of the gamma function</td>
+ <td class="code">
+ using <a href="http://en.cppreference.com/w/cpp/numeric/math/lgamma">std::lgamma</a>; \cpp11 \n
+ lgamma(a[i]);
+ </td>
+ <td></td>
+</tr>
+<tr>
+ <td class="code">
+ \anchor cwisetable_digamma
+ a.\link ArrayBase::digamma digamma\endlink(); \n
+ \link Eigen::digamma digamma\endlink(a);
+ </td>
+ <td><a href="https://en.wikipedia.org/wiki/Digamma_function">logarithmic derivative of the gamma function</a></td>
+ <td>
+ built-in for float and double
+ </td>
+ <td></td>
+</tr>
+<tr>
+ <td class="code">
+ \anchor cwisetable_igamma
+ \link Eigen::igamma igamma\endlink(a,x);
+ </td>
+ <td><a href="https://en.wikipedia.org/wiki/Incomplete_gamma_function">lower incomplete gamma integral</a>
+ \n \f$ \gamma(a_i,x_i)= \frac{1}{|a_i|} \int_{0}^{x_i}e^{\text{-}t} t^{a_i-1} \mathrm{d} t \f$</td>
+ <td>
+ built-in for float and double,\n but requires \cpp11
+ </td>
+ <td></td>
+</tr>
+<tr>
+ <td class="code">
+ \anchor cwisetable_igammac
+ \link Eigen::igammac igammac\endlink(a,x);
+ </td>
+ <td><a href="https://en.wikipedia.org/wiki/Incomplete_gamma_function">upper incomplete gamma integral</a>
+ \n \f$ \Gamma(a_i,x_i) = \frac{1}{|a_i|} \int_{x_i}^{\infty}e^{\text{-}t} t^{a_i-1} \mathrm{d} t \f$</td>
+ <td>
+ built-in for float and double,\n but requires \cpp11
+ </td>
+ <td></td>
+</tr>
+<tr>
+<th colspan="4">Special functions</th>
+</tr>
+<tr> <td colspan="4"> Require \c #include \c <unsupported/Eigen/SpecialFunctions> </td></tr>
+<tr>
+ <td class="code">
+ \anchor cwisetable_polygamma
+ \link Eigen::polygamma polygamma\endlink(n,x);
+ </td>
+ <td><a href="https://en.wikipedia.org/wiki/Polygamma_function">n-th derivative of digamma at x</a></td>
+ <td>
+ built-in generic based on\n <a href="#cwisetable_lgamma">\c lgamma </a>,
+ <a href="#cwisetable_digamma"> \c digamma </a>
+ and <a href="#cwisetable_zeta">\c zeta </a>.
+ </td>
+ <td></td>
+</tr>
+<tr>
+ <td class="code">
+ \anchor cwisetable_betainc
+ \link Eigen::betainc betainc\endlink(a,b,x);
+ </td>
+ <td><a href="https://en.wikipedia.org/wiki/Beta_function#Incomplete_beta_function">Incomplete beta function</a></td>
+ <td>
+ built-in for float and double,\n but requires \cpp11
+ </td>
+ <td></td>
+</tr>
+<tr>
+ <td class="code">
+ \anchor cwisetable_zeta
+ \link Eigen::zeta zeta\endlink(a,b);
+ </td>
+ <td><a href="https://en.wikipedia.org/wiki/Hurwitz_zeta_function">Hurwitz zeta function</a>
+ \n \f$ \zeta(a_i,b_i)=\sum_{k=0}^{\infty}(b_i+k)^{\text{-}a_i} \f$</td>
+ <td>
+ built-in for float and double
+ </td>
+ <td></td>
+</tr>
+<tr><td colspan="4"></td></tr>
+</table>
+
+\n
+
+*/
+
+} \ No newline at end of file
diff --git a/doc/CustomizingEigen.dox b/doc/CustomizingEigen.dox
deleted file mode 100644
index cb25f4ec9..000000000
--- a/doc/CustomizingEigen.dox
+++ /dev/null
@@ -1,226 +0,0 @@
-namespace Eigen {
-
-/** \page TopicCustomizingEigen Customizing/Extending Eigen
-
-Eigen can be extended in several ways, for instance, by defining global methods, \ref ExtendingMatrixBase "by adding custom methods to MatrixBase", adding support to \ref CustomScalarType "custom types" etc.
-
-\eigenAutoToc
-
-\section ExtendingMatrixBase Extending MatrixBase (and other classes)
-
-In this section we will see how to add custom methods to MatrixBase. Since all expressions and matrix types inherit MatrixBase, adding a method to MatrixBase make it immediately available to all expressions ! A typical use case is, for instance, to make Eigen compatible with another API.
-
-You certainly know that in C++ it is not possible to add methods to an existing class. So how that's possible ? Here the trick is to include in the declaration of MatrixBase a file defined by the preprocessor token \c EIGEN_MATRIXBASE_PLUGIN:
-\code
-class MatrixBase {
- // ...
- #ifdef EIGEN_MATRIXBASE_PLUGIN
- #include EIGEN_MATRIXBASE_PLUGIN
- #endif
-};
-\endcode
-Therefore to extend MatrixBase with your own methods you just have to create a file with your method declaration and define EIGEN_MATRIXBASE_PLUGIN before you include any Eigen's header file.
-
-You can extend many of the other classes used in Eigen by defining similarly named preprocessor symbols. For instance, define \c EIGEN_ARRAYBASE_PLUGIN if you want to extend the ArrayBase class. A full list of classes that can be extended in this way and the corresponding preprocessor symbols can be found on our page \ref TopicPreprocessorDirectives.
-
-Here is an example of an extension file for adding methods to MatrixBase: \n
-\b MatrixBaseAddons.h
-\code
-inline Scalar at(uint i, uint j) const { return this->operator()(i,j); }
-inline Scalar& at(uint i, uint j) { return this->operator()(i,j); }
-inline Scalar at(uint i) const { return this->operator[](i); }
-inline Scalar& at(uint i) { return this->operator[](i); }
-
-inline RealScalar squaredLength() const { return squaredNorm(); }
-inline RealScalar length() const { return norm(); }
-inline RealScalar invLength(void) const { return fast_inv_sqrt(squaredNorm()); }
-
-template<typename OtherDerived>
-inline Scalar squaredDistanceTo(const MatrixBase<OtherDerived>& other) const
-{ return (derived() - other.derived()).squaredNorm(); }
-
-template<typename OtherDerived>
-inline RealScalar distanceTo(const MatrixBase<OtherDerived>& other) const
-{ return internal::sqrt(derived().squaredDistanceTo(other)); }
-
-inline void scaleTo(RealScalar l) { RealScalar vl = norm(); if (vl>1e-9) derived() *= (l/vl); }
-
-inline Transpose<Derived> transposed() {return this->transpose();}
-inline const Transpose<Derived> transposed() const {return this->transpose();}
-
-inline uint minComponentId(void) const { int i; this->minCoeff(&i); return i; }
-inline uint maxComponentId(void) const { int i; this->maxCoeff(&i); return i; }
-
-template<typename OtherDerived>
-void makeFloor(const MatrixBase<OtherDerived>& other) { derived() = derived().cwiseMin(other.derived()); }
-template<typename OtherDerived>
-void makeCeil(const MatrixBase<OtherDerived>& other) { derived() = derived().cwiseMax(other.derived()); }
-
-const CwiseUnaryOp<internal::scalar_add_op<Scalar>, Derived>
-operator+(const Scalar& scalar) const
-{ return CwiseUnaryOp<internal::scalar_add_op<Scalar>, Derived>(derived(), internal::scalar_add_op<Scalar>(scalar)); }
-
-friend const CwiseUnaryOp<internal::scalar_add_op<Scalar>, Derived>
-operator+(const Scalar& scalar, const MatrixBase<Derived>& mat)
-{ return CwiseUnaryOp<internal::scalar_add_op<Scalar>, Derived>(mat.derived(), internal::scalar_add_op<Scalar>(scalar)); }
-\endcode
-
-Then one can the following declaration in the config.h or whatever prerequisites header file of his project:
-\code
-#define EIGEN_MATRIXBASE_PLUGIN "MatrixBaseAddons.h"
-\endcode
-
-\section InheritingFromMatrix Inheriting from Matrix
-
-Before inheriting from Matrix, be really, I mean REALLY, sure that using
-EIGEN_MATRIX_PLUGIN is not what you really want (see previous section).
-If you just need to add few members to Matrix, this is the way to go.
-
-An example of when you actually need to inherit Matrix, is when you
-have several layers of heritage such as
-MyVerySpecificVector1, MyVerySpecificVector2 -> MyVector1 -> Matrix and
-MyVerySpecificVector3, MyVerySpecificVector4 -> MyVector2 -> Matrix.
-
-In order for your object to work within the %Eigen framework, you need to
-define a few members in your inherited class.
-
-Here is a minimalistic example:
-
-\include CustomizingEigen_Inheritance.cpp
-
-Output: \verbinclude CustomizingEigen_Inheritance.out
-
-This is the kind of error you can get if you don't provide those methods
-\verbatim
-error: no match for ‘operator=’ in ‘v = Eigen::operator*(
-const Eigen::MatrixBase<Eigen::Matrix<double, -0x000000001, 1, 0, -0x000000001, 1> >::Scalar&,
-const Eigen::MatrixBase<Eigen::Matrix<double, -0x000000001, 1> >::StorageBaseType&)
-(((const Eigen::MatrixBase<Eigen::Matrix<double, -0x000000001, 1> >::StorageBaseType&)
-((const Eigen::MatrixBase<Eigen::Matrix<double, -0x000000001, 1> >::StorageBaseType*)(& v))))’
-\endverbatim
-
-\anchor user_defined_scalars \section CustomScalarType Using custom scalar types
-
-By default, Eigen currently supports standard floating-point types (\c float, \c double, \c std::complex<float>, \c std::complex<double>, \c long \c double), as well as all native integer types (e.g., \c int, \c unsigned \c int, \c short, etc.), and \c bool.
-On x86-64 systems, \c long \c double permits to locally enforces the use of x87 registers with extended accuracy (in comparison to SSE).
-
-In order to add support for a custom type \c T you need:
--# make sure the common operator (+,-,*,/,etc.) are supported by the type \c T
--# add a specialization of struct Eigen::NumTraits<T> (see \ref NumTraits)
--# define the math functions that makes sense for your type. This includes standard ones like sqrt, pow, sin, tan, conj, real, imag, etc, as well as abs2 which is Eigen specific.
- (see the file Eigen/src/Core/MathFunctions.h)
-
-The math function should be defined in the same namespace than \c T, or in the \c std namespace though that second approach is not recommended.
-
-Here is a concrete example adding support for the Adolc's \c adouble type. <a href="https://projects.coin-or.org/ADOL-C">Adolc</a> is an automatic differentiation library. The type \c adouble is basically a real value tracking the values of any number of partial derivatives.
-
-\code
-#ifndef ADOLCSUPPORT_H
-#define ADOLCSUPPORT_H
-
-#define ADOLC_TAPELESS
-#include <adolc/adouble.h>
-#include <Eigen/Core>
-
-namespace Eigen {
-
-template<> struct NumTraits<adtl::adouble>
- : NumTraits<double> // permits to get the epsilon, dummy_precision, lowest, highest functions
-{
- typedef adtl::adouble Real;
- typedef adtl::adouble NonInteger;
- typedef adtl::adouble Nested;
-
- enum {
- IsComplex = 0,
- IsInteger = 0,
- IsSigned = 1,
- RequireInitialization = 1,
- ReadCost = 1,
- AddCost = 3,
- MulCost = 3
- };
-};
-
-}
-
-namespace adtl {
-
-inline const adouble& conj(const adouble& x) { return x; }
-inline const adouble& real(const adouble& x) { return x; }
-inline adouble imag(const adouble&) { return 0.; }
-inline adouble abs(const adouble& x) { return fabs(x); }
-inline adouble abs2(const adouble& x) { return x*x; }
-
-}
-
-#endif // ADOLCSUPPORT_H
-\endcode
-
-This other example adds support for the \c mpq_class type from <a href="https://gmplib.org/">GMP</a>. It shows in particular how to change the way Eigen picks the best pivot during LU factorization. It selects the coefficient with the highest score, where the score is by default the absolute value of a number, but we can define a different score, for instance to prefer pivots with a more compact representation (this is an example, not a recommendation). Note that the scores should always be non-negative and only zero is allowed to have a score of zero. Also, this can interact badly with thresholds for inexact scalar types.
-
-\code
-#include <gmpxx.h>
-#include <Eigen/Core>
-#include <boost/operators.hpp>
-
-namespace Eigen {
- template<class> struct NumTraits;
- template<> struct NumTraits<mpq_class>
- {
- typedef mpq_class Real;
- typedef mpq_class NonInteger;
- typedef mpq_class Nested;
-
- static inline Real epsilon() { return 0; }
- static inline Real dummy_precision() { return 0; }
-
- enum {
- IsInteger = 0,
- IsSigned = 1,
- IsComplex = 0,
- RequireInitialization = 1,
- ReadCost = 6,
- AddCost = 150,
- MulCost = 100
- };
- };
-
- namespace internal {
- template<>
- struct significant_decimals_impl<mpq_class>
- {
- // Infinite precision when printing
- static inline int run() { return 0; }
- };
-
- template<> struct scalar_score_coeff_op<mpq_class> {
- struct result_type : boost::totally_ordered1<result_type> {
- std::size_t len;
- result_type(int i = 0) : len(i) {} // Eigen uses Score(0) and Score()
- result_type(mpq_class const& q) :
- len(mpz_size(q.get_num_mpz_t())+
- mpz_size(q.get_den_mpz_t())-1) {}
- friend bool operator<(result_type x, result_type y) {
- // 0 is the worst possible pivot
- if (x.len == 0) return y.len > 0;
- if (y.len == 0) return false;
- // Prefer a pivot with a small representation
- return x.len > y.len;
- }
- friend bool operator==(result_type x, result_type y) {
- // Only used to test if the score is 0
- return x.len == y.len;
- }
- };
- result_type operator()(mpq_class const& x) const { return x; }
- };
- }
-}
-\endcode
-
-\sa \ref TopicPreprocessorDirectives
-
-*/
-
-}
diff --git a/doc/CustomizingEigen_CustomScalar.dox b/doc/CustomizingEigen_CustomScalar.dox
new file mode 100644
index 000000000..1ee78cbe5
--- /dev/null
+++ b/doc/CustomizingEigen_CustomScalar.dox
@@ -0,0 +1,120 @@
+namespace Eigen {
+
+/** \page TopicCustomizing_CustomScalar Using custom scalar types
+\anchor user_defined_scalars
+
+By default, Eigen currently supports standard floating-point types (\c float, \c double, \c std::complex<float>, \c std::complex<double>, \c long \c double), as well as all native integer types (e.g., \c int, \c unsigned \c int, \c short, etc.), and \c bool.
+On x86-64 systems, \c long \c double permits to locally enforces the use of x87 registers with extended accuracy (in comparison to SSE).
+
+In order to add support for a custom type \c T you need:
+-# make sure the common operator (+,-,*,/,etc.) are supported by the type \c T
+-# add a specialization of struct Eigen::NumTraits<T> (see \ref NumTraits)
+-# define the math functions that makes sense for your type. This includes standard ones like sqrt, pow, sin, tan, conj, real, imag, etc, as well as abs2 which is Eigen specific.
+ (see the file Eigen/src/Core/MathFunctions.h)
+
+The math function should be defined in the same namespace than \c T, or in the \c std namespace though that second approach is not recommended.
+
+Here is a concrete example adding support for the Adolc's \c adouble type. <a href="https://projects.coin-or.org/ADOL-C">Adolc</a> is an automatic differentiation library. The type \c adouble is basically a real value tracking the values of any number of partial derivatives.
+
+\code
+#ifndef ADOLCSUPPORT_H
+#define ADOLCSUPPORT_H
+
+#define ADOLC_TAPELESS
+#include <adolc/adouble.h>
+#include <Eigen/Core>
+
+namespace Eigen {
+
+template<> struct NumTraits<adtl::adouble>
+ : NumTraits<double> // permits to get the epsilon, dummy_precision, lowest, highest functions
+{
+ typedef adtl::adouble Real;
+ typedef adtl::adouble NonInteger;
+ typedef adtl::adouble Nested;
+
+ enum {
+ IsComplex = 0,
+ IsInteger = 0,
+ IsSigned = 1,
+ RequireInitialization = 1,
+ ReadCost = 1,
+ AddCost = 3,
+ MulCost = 3
+ };
+};
+
+}
+
+namespace adtl {
+
+inline const adouble& conj(const adouble& x) { return x; }
+inline const adouble& real(const adouble& x) { return x; }
+inline adouble imag(const adouble&) { return 0.; }
+inline adouble abs(const adouble& x) { return fabs(x); }
+inline adouble abs2(const adouble& x) { return x*x; }
+
+}
+
+#endif // ADOLCSUPPORT_H
+\endcode
+
+This other example adds support for the \c mpq_class type from <a href="https://gmplib.org/">GMP</a>. It shows in particular how to change the way Eigen picks the best pivot during LU factorization. It selects the coefficient with the highest score, where the score is by default the absolute value of a number, but we can define a different score, for instance to prefer pivots with a more compact representation (this is an example, not a recommendation). Note that the scores should always be non-negative and only zero is allowed to have a score of zero. Also, this can interact badly with thresholds for inexact scalar types.
+
+\code
+#include <gmpxx.h>
+#include <Eigen/Core>
+#include <boost/operators.hpp>
+
+namespace Eigen {
+ template<> struct NumTraits<mpq_class> : GenericNumTraits<mpq_class>
+ {
+ typedef mpq_class Real;
+ typedef mpq_class NonInteger;
+ typedef mpq_class Nested;
+
+ static inline Real epsilon() { return 0; }
+ static inline Real dummy_precision() { return 0; }
+ static inline Real digits10() { return 0; }
+
+ enum {
+ IsInteger = 0,
+ IsSigned = 1,
+ IsComplex = 0,
+ RequireInitialization = 1,
+ ReadCost = 6,
+ AddCost = 150,
+ MulCost = 100
+ };
+ };
+
+ namespace internal {
+
+ template<> struct scalar_score_coeff_op<mpq_class> {
+ struct result_type : boost::totally_ordered1<result_type> {
+ std::size_t len;
+ result_type(int i = 0) : len(i) {} // Eigen uses Score(0) and Score()
+ result_type(mpq_class const& q) :
+ len(mpz_size(q.get_num_mpz_t())+
+ mpz_size(q.get_den_mpz_t())-1) {}
+ friend bool operator<(result_type x, result_type y) {
+ // 0 is the worst possible pivot
+ if (x.len == 0) return y.len > 0;
+ if (y.len == 0) return false;
+ // Prefer a pivot with a small representation
+ return x.len > y.len;
+ }
+ friend bool operator==(result_type x, result_type y) {
+ // Only used to test if the score is 0
+ return x.len == y.len;
+ }
+ };
+ result_type operator()(mpq_class const& x) const { return x; }
+ };
+ }
+}
+\endcode
+
+*/
+
+}
diff --git a/doc/CustomizingEigen_InheritingMatrix.dox b/doc/CustomizingEigen_InheritingMatrix.dox
new file mode 100644
index 000000000..b21e55433
--- /dev/null
+++ b/doc/CustomizingEigen_InheritingMatrix.dox
@@ -0,0 +1,34 @@
+namespace Eigen {
+
+/** \page TopicCustomizing_InheritingMatrix Inheriting from Matrix
+
+Before inheriting from Matrix, be really, I mean REALLY, sure that using
+EIGEN_MATRIX_PLUGIN is not what you really want (see previous section).
+If you just need to add few members to Matrix, this is the way to go.
+
+An example of when you actually need to inherit Matrix, is when you
+have several layers of heritage such as
+MyVerySpecificVector1, MyVerySpecificVector2 -> MyVector1 -> Matrix and
+MyVerySpecificVector3, MyVerySpecificVector4 -> MyVector2 -> Matrix.
+
+In order for your object to work within the %Eigen framework, you need to
+define a few members in your inherited class.
+
+Here is a minimalistic example:
+
+\include CustomizingEigen_Inheritance.cpp
+
+Output: \verbinclude CustomizingEigen_Inheritance.out
+
+This is the kind of error you can get if you don't provide those methods
+\verbatim
+error: no match for ‘operator=’ in ‘v = Eigen::operator*(
+const Eigen::MatrixBase<Eigen::Matrix<double, -0x000000001, 1, 0, -0x000000001, 1> >::Scalar&,
+const Eigen::MatrixBase<Eigen::Matrix<double, -0x000000001, 1> >::StorageBaseType&)
+(((const Eigen::MatrixBase<Eigen::Matrix<double, -0x000000001, 1> >::StorageBaseType&)
+((const Eigen::MatrixBase<Eigen::Matrix<double, -0x000000001, 1> >::StorageBaseType*)(& v))))’
+\endverbatim
+
+*/
+
+}
diff --git a/doc/CustomizingEigen_NullaryExpr.dox b/doc/CustomizingEigen_NullaryExpr.dox
new file mode 100644
index 000000000..37c8dcd89
--- /dev/null
+++ b/doc/CustomizingEigen_NullaryExpr.dox
@@ -0,0 +1,86 @@
+namespace Eigen {
+
+/** \page TopicCustomizing_NullaryExpr Matrix manipulation via nullary-expressions
+
+
+The main purpose of the class CwiseNullaryOp is to define \em procedural matrices such as constant or random matrices as returned by the Ones(), Zero(), Constant(), Identity() and Random() methods.
+Nevertheless, with some imagination it is possible to accomplish very sophisticated matrix manipulation with minimal efforts such that \ref TopicNewExpressionType "implementing new expression" is rarely needed.
+
+\section NullaryExpr_Circulant Example 1: circulant matrix
+
+To explore these possibilities let us start with the \em circulant example of the \ref TopicNewExpressionType "implementing new expression" topic.
+Let us recall that a circulant matrix is a matrix where each column is the same as the
+column to the left, except that it is cyclically shifted downwards.
+For example, here is a 4-by-4 circulant matrix:
+\f[ \begin{bmatrix}
+ 1 & 8 & 4 & 2 \\
+ 2 & 1 & 8 & 4 \\
+ 4 & 2 & 1 & 8 \\
+ 8 & 4 & 2 & 1
+\end{bmatrix} \f]
+A circulant matrix is uniquely determined by its first column. We wish
+to write a function \c makeCirculant which, given the first column,
+returns an expression representing the circulant matrix.
+
+For this exercise, the return type of \c makeCirculant will be a CwiseNullaryOp that we need to instantiate with:
+1 - a proper \c circulant_functor storing the input vector and implementing the adequate coefficient accessor \c operator(i,j)
+2 - a template instantiation of class Matrix conveying compile-time information such as the scalar type, sizes, and preferred storage layout.
+
+Calling \c ArgType the type of the input vector, we can construct the equivalent squared Matrix type as follows:
+
+\snippet make_circulant2.cpp square
+
+This little helper structure will help us to implement our \c makeCirculant function as follows:
+
+\snippet make_circulant2.cpp makeCirculant
+
+As usual, our function takes as argument a \c MatrixBase (see this \ref TopicFunctionTakingEigenTypes "page" for more details).
+Then, the CwiseNullaryOp object is constructed through the DenseBase::NullaryExpr static method with the adequate runtime sizes.
+
+Then, we need to implement our \c circulant_functor, which is a straightforward exercise:
+
+\snippet make_circulant2.cpp circulant_func
+
+We are now all set to try our new feature:
+
+\snippet make_circulant2.cpp main
+
+
+If all the fragments are combined, the following output is produced,
+showing that the program works as expected:
+
+\include make_circulant2.out
+
+This implementation of \c makeCirculant is much simpler than \ref TopicNewExpressionType "defining a new expression" from scratch.
+
+
+\section NullaryExpr_Indexing Example 2: indexing rows and columns
+
+The goal here is to mimic MatLab's ability to index a matrix through two vectors of indices referencing the rows and columns to be picked respectively, like this:
+
+\snippet nullary_indexing.out main1
+
+To this end, let us first write a nullary-functor storing references to the input matrix and to the two arrays of indices, and implementing the required \c operator()(i,j):
+
+\snippet nullary_indexing.cpp functor
+
+Then, let's create an \c indexing(A,rows,cols) function creating the nullary expression:
+
+\snippet nullary_indexing.cpp function
+
+Finally, here is an example of how this function can be used:
+
+\snippet nullary_indexing.cpp main1
+
+This straightforward implementation is already quite powerful as the row or column index arrays can also be expressions to perform offsetting, modulo, striding, reverse, etc.
+
+\snippet nullary_indexing.cpp main2
+
+and the output is:
+
+\snippet nullary_indexing.out main2
+
+*/
+
+}
+
diff --git a/doc/CustomizingEigen_Plugins.dox b/doc/CustomizingEigen_Plugins.dox
new file mode 100644
index 000000000..d88f2409b
--- /dev/null
+++ b/doc/CustomizingEigen_Plugins.dox
@@ -0,0 +1,69 @@
+namespace Eigen {
+
+/** \page TopicCustomizing_Plugins Extending MatrixBase (and other classes)
+
+In this section we will see how to add custom methods to MatrixBase. Since all expressions and matrix types inherit MatrixBase, adding a method to MatrixBase make it immediately available to all expressions ! A typical use case is, for instance, to make Eigen compatible with another API.
+
+You certainly know that in C++ it is not possible to add methods to an existing class. So how that's possible ? Here the trick is to include in the declaration of MatrixBase a file defined by the preprocessor token \c EIGEN_MATRIXBASE_PLUGIN:
+\code
+class MatrixBase {
+ // ...
+ #ifdef EIGEN_MATRIXBASE_PLUGIN
+ #include EIGEN_MATRIXBASE_PLUGIN
+ #endif
+};
+\endcode
+Therefore to extend MatrixBase with your own methods you just have to create a file with your method declaration and define EIGEN_MATRIXBASE_PLUGIN before you include any Eigen's header file.
+
+You can extend many of the other classes used in Eigen by defining similarly named preprocessor symbols. For instance, define \c EIGEN_ARRAYBASE_PLUGIN if you want to extend the ArrayBase class. A full list of classes that can be extended in this way and the corresponding preprocessor symbols can be found on our page \ref TopicPreprocessorDirectives.
+
+Here is an example of an extension file for adding methods to MatrixBase: \n
+\b MatrixBaseAddons.h
+\code
+inline Scalar at(uint i, uint j) const { return this->operator()(i,j); }
+inline Scalar& at(uint i, uint j) { return this->operator()(i,j); }
+inline Scalar at(uint i) const { return this->operator[](i); }
+inline Scalar& at(uint i) { return this->operator[](i); }
+
+inline RealScalar squaredLength() const { return squaredNorm(); }
+inline RealScalar length() const { return norm(); }
+inline RealScalar invLength(void) const { return fast_inv_sqrt(squaredNorm()); }
+
+template<typename OtherDerived>
+inline Scalar squaredDistanceTo(const MatrixBase<OtherDerived>& other) const
+{ return (derived() - other.derived()).squaredNorm(); }
+
+template<typename OtherDerived>
+inline RealScalar distanceTo(const MatrixBase<OtherDerived>& other) const
+{ return internal::sqrt(derived().squaredDistanceTo(other)); }
+
+inline void scaleTo(RealScalar l) { RealScalar vl = norm(); if (vl>1e-9) derived() *= (l/vl); }
+
+inline Transpose<Derived> transposed() {return this->transpose();}
+inline const Transpose<Derived> transposed() const {return this->transpose();}
+
+inline uint minComponentId(void) const { int i; this->minCoeff(&i); return i; }
+inline uint maxComponentId(void) const { int i; this->maxCoeff(&i); return i; }
+
+template<typename OtherDerived>
+void makeFloor(const MatrixBase<OtherDerived>& other) { derived() = derived().cwiseMin(other.derived()); }
+template<typename OtherDerived>
+void makeCeil(const MatrixBase<OtherDerived>& other) { derived() = derived().cwiseMax(other.derived()); }
+
+const CwiseBinaryOp<internal::scalar_sum_op<Scalar>, const Derived, const ConstantReturnType>
+operator+(const Scalar& scalar) const
+{ return CwiseBinaryOp<internal::scalar_sum_op<Scalar>, const Derived, const ConstantReturnType>(derived(), Constant(rows(),cols(),scalar)); }
+
+friend const CwiseBinaryOp<internal::scalar_sum_op<Scalar>, const ConstantReturnType, Derived>
+operator+(const Scalar& scalar, const MatrixBase<Derived>& mat)
+{ return CwiseBinaryOp<internal::scalar_sum_op<Scalar>, const ConstantReturnType, Derived>(Constant(rows(),cols(),scalar), mat.derived()); }
+\endcode
+
+Then one can the following declaration in the config.h or whatever prerequisites header file of his project:
+\code
+#define EIGEN_MATRIXBASE_PLUGIN "MatrixBaseAddons.h"
+\endcode
+
+*/
+
+}
diff --git a/doc/DenseDecompositionBenchmark.dox b/doc/DenseDecompositionBenchmark.dox
new file mode 100644
index 000000000..7be9c70cd
--- /dev/null
+++ b/doc/DenseDecompositionBenchmark.dox
@@ -0,0 +1,42 @@
+namespace Eigen {
+
+/** \eigenManualPage DenseDecompositionBenchmark Benchmark of dense decompositions
+
+This page presents a speed comparison of the dense matrix decompositions offered by %Eigen for a wide range of square matrices and overconstrained problems.
+
+For a more general overview on the features and numerical robustness of linear solvers and decompositions, check this \link TopicLinearAlgebraDecompositions table \endlink.
+
+This benchmark has been run on a laptop equipped with an Intel core i7 \@ 2,6 GHz, and compiled with clang with \b AVX and \b FMA instruction sets enabled but without multi-threading.
+It uses \b single \b precision \b float numbers. For double, you can get a good estimate by multiplying the timings by a factor 2.
+
+The square matrices are symmetric, and for the overconstrained matrices, the reported timmings include the cost to compute the symmetric covariance matrix \f$ A^T A \f$ for the first four solvers based on Cholesky and LU, as denoted by the \b * symbol (top-right corner part of the table).
+Timings are in \b milliseconds, and factors are relative to the LLT decomposition which is the fastest but also the least general and robust.
+
+<table class="manual">
+<tr><th>solver/size</th>
+ <th>8x8</th> <th>100x100</th> <th>1000x1000</th> <th>4000x4000</th> <th>10000x8</th> <th>10000x100</th> <th>10000x1000</th> <th>10000x4000</th></tr>
+<tr><td>LLT</td><td>0.05</td><td>0.42</td><td>5.83</td><td>374.55</td><td>6.79 <sup><a href="#note_ls">*</a></sup></td><td>30.15 <sup><a href="#note_ls">*</a></sup></td><td>236.34 <sup><a href="#note_ls">*</a></sup></td><td>3847.17 <sup><a href="#note_ls">*</a></sup></td></tr>
+<tr class="alt"><td>LDLT</td><td>0.07 (x1.3)</td><td>0.65 (x1.5)</td><td>26.86 (x4.6)</td><td>2361.18 (x6.3)</td><td>6.81 (x1) <sup><a href="#note_ls">*</a></sup></td><td>31.91 (x1.1) <sup><a href="#note_ls">*</a></sup></td><td>252.61 (x1.1) <sup><a href="#note_ls">*</a></sup></td><td>5807.66 (x1.5) <sup><a href="#note_ls">*</a></sup></td></tr>
+<tr><td>PartialPivLU</td><td>0.08 (x1.5)</td><td>0.69 (x1.6)</td><td>15.63 (x2.7)</td><td>709.32 (x1.9)</td><td>6.81 (x1) <sup><a href="#note_ls">*</a></sup></td><td>31.32 (x1) <sup><a href="#note_ls">*</a></sup></td><td>241.68 (x1) <sup><a href="#note_ls">*</a></sup></td><td>4270.48 (x1.1) <sup><a href="#note_ls">*</a></sup></td></tr>
+<tr class="alt"><td>FullPivLU</td><td>0.1 (x1.9)</td><td>4.48 (x10.6)</td><td>281.33 (x48.2)</td><td>-</td><td>6.83 (x1) <sup><a href="#note_ls">*</a></sup></td><td>32.67 (x1.1) <sup><a href="#note_ls">*</a></sup></td><td>498.25 (x2.1) <sup><a href="#note_ls">*</a></sup></td><td>-</td></tr>
+<tr><td>HouseholderQR</td><td>0.19 (x3.5)</td><td>2.18 (x5.2)</td><td>23.42 (x4)</td><td>1337.52 (x3.6)</td><td>34.26 (x5)</td><td>129.01 (x4.3)</td><td>377.37 (x1.6)</td><td>4839.1 (x1.3)</td></tr>
+<tr class="alt"><td>ColPivHouseholderQR</td><td>0.23 (x4.3)</td><td>2.23 (x5.3)</td><td>103.34 (x17.7)</td><td>9987.16 (x26.7)</td><td>36.05 (x5.3)</td><td>163.18 (x5.4)</td><td>2354.08 (x10)</td><td>37860.5 (x9.8)</td></tr>
+<tr><td>CompleteOrthogonalDecomposition</td><td>0.23 (x4.3)</td><td>2.22 (x5.2)</td><td>99.44 (x17.1)</td><td>10555.3 (x28.2)</td><td>35.75 (x5.3)</td><td>169.39 (x5.6)</td><td>2150.56 (x9.1)</td><td>36981.8 (x9.6)</td></tr>
+<tr class="alt"><td>FullPivHouseholderQR</td><td>0.23 (x4.3)</td><td>4.64 (x11)</td><td>289.1 (x49.6)</td><td>-</td><td>69.38 (x10.2)</td><td>446.73 (x14.8)</td><td>4852.12 (x20.5)</td><td>-</td></tr>
+<tr><td>JacobiSVD</td><td>1.01 (x18.6)</td><td>71.43 (x168.4)</td><td>-</td><td>-</td><td>113.81 (x16.7)</td><td>1179.66 (x39.1)</td><td>-</td><td>-</td></tr>
+<tr class="alt"><td>BDCSVD</td><td>1.07 (x19.7)</td><td>21.83 (x51.5)</td><td>331.77 (x56.9)</td><td>18587.9 (x49.6)</td><td>110.53 (x16.3)</td><td>397.67 (x13.2)</td><td>2975 (x12.6)</td><td>48593.2 (x12.6)</td></tr>
+</table>
+
+<a name="note_ls">\b *: </a> This decomposition do not support direct least-square solving for over-constrained problems, and the reported timing include the cost to form the symmetric covariance matrix \f$ A^T A \f$.
+
+\b Observations:
+ + LLT is always the fastest solvers.
+ + For largely over-constrained problems, the cost of Cholesky/LU decompositions is dominated by the computation of the symmetric covariance matrix.
+ + For large problem sizes, only the decomposition implementing a cache-friendly blocking strategy scale well. Those include LLT, PartialPivLU, HouseholderQR, and BDCSVD. This explain why for a 4k x 4k matrix, HouseholderQR is faster than LDLT. In the future, LDLT and ColPivHouseholderQR will also implement blocking strategies.
+ + CompleteOrthogonalDecomposition is based on ColPivHouseholderQR and they thus achieve the same level of performance.
+
+The above table has been generated by the <a href="https://bitbucket.org/eigen/eigen/raw/default/bench/dense_solvers.cpp">bench/dense_solvers.cpp</a> file, feel-free to hack it to generate a table matching your hardware, compiler, and favorite problem sizes.
+
+*/
+
+}
diff --git a/doc/Doxyfile.in b/doc/Doxyfile.in
index 0a43c7c4e..e9b116d28 100644
--- a/doc/Doxyfile.in
+++ b/doc/Doxyfile.in
@@ -125,7 +125,7 @@ ALWAYS_DETAILED_SEC = NO
# members were ordinary class members. Constructors, destructors and assignment
# operators of the base classes will not be shown.
-INLINE_INHERITED_MEMB = YES
+INLINE_INHERITED_MEMB = NO
# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full
# path before files name in the file list and in the header files. If set
@@ -216,6 +216,7 @@ ALIASES = "only_for_vectors=This is only for vectors (either row-
"lu_module=This is defined in the %LU module. \code #include <Eigen/LU> \endcode" \
"qr_module=This is defined in the %QR module. \code #include <Eigen/QR> \endcode" \
"svd_module=This is defined in the %SVD module. \code #include <Eigen/SVD> \endcode" \
+ "specialfunctions_module=This is defined in the \b unsupported SpecialFunctions module. \code #include <Eigen/SpecialFunctions> \endcode" \
"label=\bug" \
"matrixworld=<a href='#matrixonly' style='color:green;text-decoration: none;'>*</a>" \
"arrayworld=<a href='#arrayonly' style='color:blue;text-decoration: none;'>*</a>" \
@@ -225,7 +226,10 @@ ALIASES = "only_for_vectors=This is only for vectors (either row-
"note_try_to_help_rvo=This function returns the result by value. In order to make that efficient, it is implemented as just a return statement using a special constructor, hopefully allowing the compiler to perform a RVO (return value optimization)." \
"nonstableyet=\warning This is not considered to be part of the stable public API yet. Changes may happen in future releases. See \ref Experimental \"Experimental parts of Eigen\"" \
"implsparsesolverconcept=This class follows the \link TutorialSparseSolverConcept sparse solver concept \endlink." \
- "blank= "
+ "blank= " \
+ "cpp11=<span class='cpp11'>[c++11]</span>" \
+ "cpp14=<span class='cpp14'>[c++14]</span>" \
+ "cpp17=<span class='cpp17'>[c++17]</span>"
ALIASES += "eigenAutoToc= "
@@ -1587,7 +1591,8 @@ PREDEFINED = EIGEN_EMPTY_STRUCT \
EIGEN_STRONG_INLINE=inline \
EIGEN_DEVICE_FUNC= \
"EIGEN_MAKE_CWISE_BINARY_OP(METHOD,FUNCTOR)=template<typename OtherDerived> const CwiseBinaryOp<FUNCTOR<Scalar>, const Derived, const OtherDerived> METHOD(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const;" \
- "EIGEN_CWISE_PRODUCT_RETURN_TYPE(LHS,RHS)=CwiseBinaryOp<internal::scalar_product_op<typename LHS::Scalar, typename RHS::Scalar >, const LHS, const RHS>"
+ "EIGEN_CWISE_PRODUCT_RETURN_TYPE(LHS,RHS)=CwiseBinaryOp<internal::scalar_product_op<typename LHS::Scalar, typename RHS::Scalar >, const LHS, const RHS>"\
+ DOXCOMMA=,
# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
# this tag can be used to specify a list of macro names that should be expanded.
@@ -1602,7 +1607,15 @@ EXPAND_AS_DEFINED = EIGEN_MAKE_TYPEDEFS \
EIGEN_CWISE_BINOP_RETURN_TYPE \
EIGEN_CURRENT_STORAGE_BASE_CLASS \
EIGEN_MATHFUNC_IMPL \
- _EIGEN_GENERIC_PUBLIC_INTERFACE
+ _EIGEN_GENERIC_PUBLIC_INTERFACE \
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY \
+ EIGEN_EMPTY \
+ EIGEN_EULER_ANGLES_TYPEDEFS \
+ EIGEN_EULER_ANGLES_SINGLE_TYPEDEF \
+ EIGEN_EULER_SYSTEM_TYPEDEF \
+ EIGEN_DOC_UNARY_ADDONS \
+ EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL \
+ EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF
# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then
# doxygen's preprocessor will remove all references to function-like macros
diff --git a/doc/InplaceDecomposition.dox b/doc/InplaceDecomposition.dox
new file mode 100644
index 000000000..cb1c6d413
--- /dev/null
+++ b/doc/InplaceDecomposition.dox
@@ -0,0 +1,115 @@
+namespace Eigen {
+
+/** \eigenManualPage InplaceDecomposition Inplace matrix decompositions
+
+Starting from %Eigen 3.3, the LU, Cholesky, and QR decompositions can operate \em inplace, that is, directly within the given input matrix.
+This feature is especially useful when dealing with huge matrices, and or when the available memory is very limited (embedded systems).
+
+To this end, the respective decomposition class must be instantiated with a Ref<> matrix type, and the decomposition object must be constructed with the input matrix as argument. As an example, let us consider an inplace LU decomposition with partial pivoting.
+
+Let's start with the basic inclusions, and declaration of a 2x2 matrix \c A:
+
+<table class="example">
+<tr><th>code</th><th>output</th></tr>
+<tr>
+ <td>\snippet TutorialInplaceLU.cpp init
+ </td>
+ <td>\snippet TutorialInplaceLU.out init
+ </td>
+</tr>
+</table>
+
+No surprise here! Then, let's declare our inplace LU object \c lu, and check the content of the matrix \c A:
+
+<table class="example">
+<tr>
+ <td>\snippet TutorialInplaceLU.cpp declaration
+ </td>
+ <td>\snippet TutorialInplaceLU.out declaration
+ </td>
+</tr>
+</table>
+
+Here, the \c lu object computes and stores the \c L and \c U factors within the memory held by the matrix \c A.
+The coefficients of \c A have thus been destroyed during the factorization, and replaced by the L and U factors as one can verify:
+
+<table class="example">
+<tr>
+ <td>\snippet TutorialInplaceLU.cpp matrixLU
+ </td>
+ <td>\snippet TutorialInplaceLU.out matrixLU
+ </td>
+</tr>
+</table>
+
+Then, one can use the \c lu object as usual, for instance to solve the Ax=b problem:
+<table class="example">
+<tr>
+ <td>\snippet TutorialInplaceLU.cpp solve
+ </td>
+ <td>\snippet TutorialInplaceLU.out solve
+ </td>
+</tr>
+</table>
+
+Here, since the content of the original matrix \c A has been lost, we had to declared a new matrix \c A0 to verify the result.
+
+Since the memory is shared between \c A and \c lu, modifying the matrix \c A will make \c lu invalid.
+This can easily be verified by modifying the content of \c A and trying to solve the initial problem again:
+
+<table class="example">
+<tr>
+ <td>\snippet TutorialInplaceLU.cpp modifyA
+ </td>
+ <td>\snippet TutorialInplaceLU.out modifyA
+ </td>
+</tr>
+</table>
+
+Note that there is no shared pointer under the hood, it is the \b responsibility \b of \b the \b user to keep the input matrix \c A in life as long as \c lu is living.
+
+If one wants to update the factorization with the modified A, one has to call the compute method as usual:
+<table class="example">
+<tr>
+ <td>\snippet TutorialInplaceLU.cpp recompute
+ </td>
+ <td>\snippet TutorialInplaceLU.out recompute
+ </td>
+</tr>
+</table>
+
+Note that calling compute does not change the memory which is referenced by the \c lu object. Therefore, if the compute method is called with another matrix \c A1 different than \c A, then the content of \c A1 won't be modified. This is still the content of \c A that will be used to store the L and U factors of the matrix \c A1.
+This can easily be verified as follows:
+<table class="example">
+<tr>
+ <td>\snippet TutorialInplaceLU.cpp recompute_bis0
+ </td>
+ <td>\snippet TutorialInplaceLU.out recompute_bis0
+ </td>
+</tr>
+</table>
+The matrix \c A1 is unchanged, and one can thus solve A1*x=b, and directly check the residual without any copy of \c A1:
+<table class="example">
+<tr>
+ <td>\snippet TutorialInplaceLU.cpp recompute_bis1
+ </td>
+ <td>\snippet TutorialInplaceLU.out recompute_bis1
+ </td>
+</tr>
+</table>
+
+
+Here is the list of matrix decompositions supporting this inplace mechanism:
+
+- class LLT
+- class LDLT
+- class PartialPivLU
+- class FullPivLU
+- class HouseholderQR
+- class ColPivHouseholderQR
+- class FullPivHouseholderQR
+- class CompleteOrthogonalDecomposition
+
+*/
+
+} \ No newline at end of file
diff --git a/doc/Manual.dox b/doc/Manual.dox
index 70aaa9a42..a08609ad7 100644
--- a/doc/Manual.dox
+++ b/doc/Manual.dox
@@ -3,21 +3,31 @@
namespace Eigen {
+/** \page UserManual_CustomizingEigen Extending/Customizing Eigen
+ %Eigen can be extended in several ways, for instance, by defining global methods, by inserting custom methods within main %Eigen's classes through the \ref TopicCustomizing_Plugins "plugin" mechanism, by adding support to \ref TopicCustomizing_CustomScalar "custom scalar types" etc. See below for the respective sub-topics.
+ - \subpage TopicCustomizing_Plugins
+ - \subpage TopicCustomizing_InheritingMatrix
+ - \subpage TopicCustomizing_CustomScalar
+ - \subpage TopicCustomizing_NullaryExpr
+ - \subpage TopicNewExpressionType
+ \sa \ref TopicPreprocessorDirectives
+*/
+
+
/** \page UserManual_Generalities General topics
- \subpage Eigen2ToEigen3
- \subpage TopicFunctionTakingEigenTypes
- \subpage TopicPreprocessorDirectives
- \subpage TopicAssertions
- - \subpage TopicCustomizingEigen
- \subpage TopicMultiThreading
+ - \subpage TopicUsingBlasLapack
- \subpage TopicUsingIntelMKL
- \subpage TopicCUDA
- \subpage TopicPitfalls
- \subpage TopicTemplateKeyword
- - \subpage TopicNewExpressionType
- \subpage UserManual_UnderstandingEigen
*/
-
+
/** \page UserManual_UnderstandingEigen Understanding Eigen
- \subpage TopicInsideEigenExample
- \subpage TopicClassHierarchy
@@ -90,6 +100,9 @@ namespace Eigen {
/** \addtogroup Householder_Module
\ingroup DenseMatrixManipulation_Reference */
+/** \addtogroup CoeffwiseMathFunctions
+ \ingroup DenseMatrixManipulation_chapter */
+
/** \addtogroup QuickRefPage
\ingroup DenseMatrixManipulation_chapter */
@@ -103,6 +116,10 @@ namespace Eigen {
\ingroup DenseLinearSolvers_chapter */
/** \addtogroup LeastSquares
\ingroup DenseLinearSolvers_chapter */
+/** \addtogroup InplaceDecomposition
+ \ingroup DenseLinearSolvers_chapter */
+/** \addtogroup DenseDecompositionBenchmark
+ \ingroup DenseLinearSolvers_chapter */
/** \addtogroup DenseLinearSolvers_Reference
\ingroup DenseLinearSolvers_chapter */
diff --git a/doc/MatrixfreeSolverExample.dox b/doc/MatrixfreeSolverExample.dox
index 000cb0bbe..3efa292b5 100644
--- a/doc/MatrixfreeSolverExample.dox
+++ b/doc/MatrixfreeSolverExample.dox
@@ -6,12 +6,12 @@ namespace Eigen {
\eigenManualPage MatrixfreeSolverExample Matrix-free solvers
Iterative solvers such as ConjugateGradient and BiCGSTAB can be used in a matrix free context. To this end, user must provide a wrapper class inheriting EigenBase<> and implementing the following methods:
- - Index rows() and Index cols(): returns number of rows and columns respectively
- - operator* with and %Eigen dense column vector (its actual implementation goes in a specialization of the internal::generic_product_impl class)
+ - \c Index \c rows() and \c Index \c cols(): returns number of rows and columns respectively
+ - \c operator* with your type and an %Eigen dense column vector (its actual implementation goes in a specialization of the internal::generic_product_impl class)
-Eigen::internal::traits<> must also be specialized for the wrapper type.
+\c Eigen::internal::traits<> must also be specialized for the wrapper type.
-Here is a complete example wrapping a Eigen::SparseMatrix:
+Here is a complete example wrapping an Eigen::SparseMatrix:
\include matrixfree_cg.cpp
Output: \verbinclude matrixfree_cg.out
diff --git a/doc/NewExpressionType.dox b/doc/NewExpressionType.dox
index ad8b7f86b..c2f243312 100644
--- a/doc/NewExpressionType.dox
+++ b/doc/NewExpressionType.dox
@@ -2,6 +2,12 @@ namespace Eigen {
/** \page TopicNewExpressionType Adding a new expression type
+<!--<span style="font-size:130%; color:red; font-weight: 900;"></span>-->
+\warning
+Disclaimer: this page is tailored to very advanced users who are not afraid of dealing with some %Eigen's internal aspects.
+In most cases, a custom expression can be avoided by either using custom \ref MatrixBase::unaryExpr "unary" or \ref MatrixBase::binaryExpr "binary" functors,
+while extremely complex matrix manipulations can be achieved by a nullary functors as described in the \ref TopicCustomizing_NullaryExpr "previous page".
+
This page describes with the help of an example how to implement a new
light-weight expression type in %Eigen. This consists of three parts:
the expression type itself, a traits class containing compile-time
@@ -130,7 +136,7 @@ function can be called.
If all the fragments are combined, the following output is produced,
showing that the program works as expected:
-\verbinclude make_circulant.out
+\include make_circulant.out
*/
}
diff --git a/doc/Overview.dox b/doc/Overview.dox
index 9ab96233a..dbb49bd21 100644
--- a/doc/Overview.dox
+++ b/doc/Overview.dox
@@ -17,7 +17,9 @@ You're a MatLab user? There is also a <a href="AsciiQuickReference.txt">short AS
The \b main \b documentation is organized into \em chapters covering different domains of features.
They are themselves composed of \em user \em manual pages describing the different features in a comprehensive way, and \em reference pages that gives you access to the API documentation through the related Eigen's \em modules and \em classes.
-Under the \subpage UserManual_Generalities section, you will find documentation on more general topics such as preprocessor directives, controlling assertions, multi-threading, MKL support, some Eigen's internal insights, and much more...
+Under the \subpage UserManual_CustomizingEigen section, you will find discussions and examples on extending %Eigen's features and supporting custom scalar types.
+
+Under the \subpage UserManual_Generalities section, you will find documentation on more general topics such as preprocessor directives, controlling assertions, multi-threading, MKL support, some Eigen's internal insights, and much more...
Finally, do not miss the search engine, useful to quickly get to the documentation of a given class or function.
diff --git a/doc/PreprocessorDirectives.dox b/doc/PreprocessorDirectives.dox
index 14e84bc20..2f9c4c370 100644
--- a/doc/PreprocessorDirectives.dox
+++ b/doc/PreprocessorDirectives.dox
@@ -49,6 +49,36 @@ are doing.
the correct size. Not defined by default.
+\section TopicPreprocessorDirectivesCppVersion C++ standard features
+
+By default, %Eigen strive to automatically detect and enable langage features at compile-time based on
+the information provided by the compiler.
+
+ - \b EIGEN_MAX_CPP_VER - disables usage of C++ features requiring a version greater than EIGEN_MAX_CPP_VER.
+ Possible values are: 03, 11, 14, 17, etc. If not defined (the default), %Eigen enables all features supported
+ by the compiler.
+
+Individual features can be explicitly enabled or disabled by defining the following token to 0 or 1 respectively.
+For instance, one might limit the C++ version to C++03 by defining EIGEN_MAX_CPP_VER=03, but still enable C99 math
+functions by defining EIGEN_HAS_C99_MATH=1.
+
+ - \b EIGEN_HAS_C99_MATH - controls the usage of C99 math functions such as erf, erfc, lgamma, etc.
+ Automatic detection disabled if EIGEN_MAX_CPP_VER<11.
+ - \b EIGEN_HAS_CXX11_MATH - controls the implementation of some functions such as round, logp1, isinf, isnan, etc.
+ Automatic detection disabled if EIGEN_MAX_CPP_VER<11.
+ - \b EIGEN_HAS_RVALUE_REFERENCES - defines whetehr rvalue references are supported
+ Automatic detection disabled if EIGEN_MAX_CPP_VER<11.
+ - \b EIGEN_HAS_STD_RESULT_OF - defines whether std::result_of is supported
+ Automatic detection disabled if EIGEN_MAX_CPP_VER<11.
+ - \b EIGEN_HAS_VARIADIC_TEMPLATES - defines whether variadic templates are supported
+ Automatic detection disabled if EIGEN_MAX_CPP_VER<11.
+ - \b EIGEN_HAS_CONSTEXPR - defines whether relaxed const expression are supported
+ Automatic detection disabled if EIGEN_MAX_CPP_VER<14.
+ - \b EIGEN_HAS_CXX11_CONTAINERS - defines whether STL's containers follows C++11 specifications
+ Automatic detection disabled if EIGEN_MAX_CPP_VER<11.
+ - \b EIGEN_HAS_CXX11_NOEXCEPT - defines whether noexcept is supported
+ Automatic detection disabled if EIGEN_MAX_CPP_VER<11.
+
\section TopicPreprocessorDirectivesAssertions Assertions
The %Eigen library contains many assertions to guard against programming errors, both at compile time and at
@@ -78,6 +108,9 @@ run time. However, these assertions do cost time and can thus be turned off.
See \ref TopicMultiThreading for details.
- \b EIGEN_DONT_VECTORIZE - disables explicit vectorization when defined. Not defined by default, unless
alignment is disabled by %Eigen's platform test or the user defining \c EIGEN_DONT_ALIGN.
+ - \b EIGEN_UNALIGNED_VECTORIZE - disables/enables vectorization with unaligned stores. Default is 1 (enabled).
+ If set to 0 (disabled), then expression for which the destination cannot be aligned are not vectorized (e.g., unaligned
+ small fixed size vectors or matrices)
- \b EIGEN_FAST_MATH - enables some optimizations which might affect the accuracy of the result. This currently
enables the SSE vectorization of sin() and cos(), and speedups sqrt() for single precision. Defined to 1 by default.
Define it to 0 to disable.
diff --git a/doc/SparseLinearSystems.dox b/doc/SparseLinearSystems.dox
index ee4f53a4e..fc33b93e7 100644
--- a/doc/SparseLinearSystems.dox
+++ b/doc/SparseLinearSystems.dox
@@ -76,6 +76,9 @@ They are summarized in the following tables:
<tr><td>SPQR</td><td>\link SPQRSupport_Module SPQRSupport \endlink </td> <td> QR factorization </td>
<td> Any, rectangular</td><td>fill-in reducing, multithreaded, fast dense algebra</td>
<td> requires the <a href="http://www.suitesparse.com">SuiteSparse</a> package, \b GPL </td><td>recommended for linear least-squares problems, has a rank-revealing feature</tr>
+<tr><td>PardisoLLT \n PardisoLDLT \n PardisoLU</td><td>\link PardisoSupport_Module PardisoSupport \endlink</td><td>Direct LLt, LDLt, LU factorizations</td><td>SPD \n SPD \n Square</td><td>Fill-in reducing, Leverage fast dense algebra, Multithreading</td>
+ <td>Requires the <a href="http://eigen.tuxfamily.org/Counter/redirect_to_mkl.php">Intel MKL</a> package, \b Proprietary </td>
+ <td>optimized for tough problems patterns, see also \link TopicUsingIntelMKL using MKL with Eigen \endlink</td></tr>
</table>
Here \c SPD means symmetric positive definite.
diff --git a/doc/SparseQuickReference.dox b/doc/SparseQuickReference.dox
index e0a30edcc..a25622e80 100644
--- a/doc/SparseQuickReference.dox
+++ b/doc/SparseQuickReference.dox
@@ -206,7 +206,7 @@ See \ref TutorialSparse_SubMatrices and below for read-write sub-matrices.
sm1.innerVectors(start, size); // RW
sm1.leftCols(size); // RW
sm2.rightCols(size); // RO because sm2 is row-major
- sm1.middleRows(start, numRows); // RO becasue sm1 is column-major
+ sm1.middleRows(start, numRows); // RO because sm1 is column-major
sm1.middleCols(start, numCols); // RW
sm1.col(j); // RW
\endcode
@@ -253,6 +253,20 @@ If the matrix is not in compressed form, makeCompressed() should be called befor
Note that these functions are mostly provided for interoperability purposes with external libraries.\n
A better access to the values of the matrix is done by using the InnerIterator class as described in \link TutorialSparse the Tutorial Sparse \endlink section</td>
</tr>
+<tr class="alt"><td colspan="2">Mapping external buffers</td></tr>
+<tr class="alt">
+<td>
+\code
+int outerIndexPtr[cols+1];
+int innerIndices[nnz];
+double values[nnz];
+Map<SparseMatrix<double> > sm1(rows,cols,nnz,outerIndexPtr, // read-write
+ innerIndices,values);
+Map<const SparseMatrix<double> > sm2(...); // read-only
+\endcode
+</td>
+<td>As for dense matrices, class Map<SparseMatrixType> can be used to see external buffers as an %Eigen's SparseMatrix object. </td>
+</tr>
</table>
*/
}
diff --git a/doc/TopicAssertions.dox b/doc/TopicAssertions.dox
index 4ead40174..c8b4d84f2 100644
--- a/doc/TopicAssertions.dox
+++ b/doc/TopicAssertions.dox
@@ -16,7 +16,7 @@ Both eigen_assert and eigen_plain_assert are defined in Macros.h. Defining eigen
#include <stdexcept>
#undef eigen_assert
#define eigen_assert(x) \
- if (!x) { throw (std::runtime_error("Put your message here")); }
+ if (!(x)) { throw (std::runtime_error("Put your message here")); }
\endcode
\subsection DisableAssert Disabling assertions
diff --git a/doc/TopicLinearAlgebraDecompositions.dox b/doc/TopicLinearAlgebraDecompositions.dox
index 5bcff2c96..491470627 100644
--- a/doc/TopicLinearAlgebraDecompositions.dox
+++ b/doc/TopicLinearAlgebraDecompositions.dox
@@ -4,6 +4,7 @@ namespace Eigen {
This page presents a catalogue of the dense matrix decompositions offered by Eigen.
For an introduction on linear solvers and decompositions, check this \link TutorialLinearAlgebra page \endlink.
+To get an overview of the true relative speed of the different decomposition, check this \link DenseDecompositionBenchmark benchmark \endlink.
\section TopicLinAlgBigTable Catalogue of decompositions offered by Eigen
@@ -256,6 +257,7 @@ For an introduction on linear solvers and decompositions, check this \link Tutor
<dd></dd>
</dl>
+
*/
}
diff --git a/doc/UnalignedArrayAssert.dox b/doc/UnalignedArrayAssert.dox
index f0f84d25f..95d95a2d5 100644
--- a/doc/UnalignedArrayAssert.dox
+++ b/doc/UnalignedArrayAssert.dox
@@ -92,27 +92,28 @@ Note that here, Eigen::Quaternionf is only used as an example, more generally th
\section explanation General explanation of this assertion
-\ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen objects" must absolutely be created at 16-byte-aligned locations, otherwise SIMD instructions adressing them will crash.
+\ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen objects" must absolutely be created at 16-byte-aligned locations, otherwise SIMD instructions addressing them will crash.
Eigen normally takes care of these alignment issues for you, by setting an alignment attribute on them and by overloading their "operator new".
However there are a few corner cases where these alignment settings get overridden: they are the possible causes for this assertion.
-\section getrid I don't care about vectorization, how do I get rid of that stuff?
+\section getrid I don't care about optimal vectorization, how do I get rid of that stuff?
-Two possibilities:
+Three possibilities:
<ul>
- <li>Define EIGEN_DONT_ALIGN_STATICALLY. That disables all 128-bit static alignment code, while keeping 128-bit heap alignment. This has the effect of
- disabling vectorization for fixed-size objects (like Matrix4d) while keeping vectorization of dynamic-size objects
- (like MatrixXd). But do note that this breaks ABI compatibility with the default behavior of 128-bit static alignment.</li>
- <li>Or define both EIGEN_DONT_VECTORIZE and EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT. This keeps the
- 128-bit alignment code and thus preserves ABI compatibility, but completely disables vectorization.</li>
+ <li>Use the \c DontAlign option to Matrix, Array, Quaternion, etc. objects that gives you trouble. This way Eigen won't try to align them, and thus won"t assume any special alignment. On the down side, you will pay the cost of unaligned loads/stores for them, but on modern CPUs, the overhead is either null or marginal. See \link StructHavingEigenMembers_othersolutions here \endlink for an example.</li>
+ <li>Define \link TopicPreprocessorDirectivesPerformance EIGEN_DONT_ALIGN_STATICALLY \endlink. That disables all 16-byte (and above) static alignment code, while keeping 16-byte (or above) heap alignment. This has the effect of
+ vectorizing fixed-size objects (like Matrix4d) through unaligned stores (as controlled by \link TopicPreprocessorDirectivesPerformance EIGEN_UNALIGNED_VECTORIZE \endlink), while keeping unchanged the vectorization of dynamic-size objects
+ (like MatrixXd). But do note that this breaks ABI compatibility with the default behavior of static alignment.</li>
+ <li>Or define both \link TopicPreprocessorDirectivesPerformance EIGEN_DONT_VECTORIZE \endlink and EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT. This keeps the
+ 16-byte alignment code and thus preserves ABI compatibility, but completely disables vectorization.</li>
</ul>
-If you want to know why defining EIGEN_DONT_VECTORIZE does not by itself disable 128-bit alignment and the assertion, here's the explanation:
+If you want to know why defining EIGEN_DONT_VECTORIZE does not by itself disable 16-byte alignment and the assertion, here's the explanation:
It doesn't disable the assertion, because otherwise code that runs fine without vectorization would suddenly crash when enabling vectorization.
-It doesn't disable 128bit alignment, because that would mean that vectorized and non-vectorized code are not mutually ABI-compatible. This ABI compatibility is very important, even for people who develop only an in-house application, as for instance one may want to have in the same application a vectorized path and a non-vectorized path.
+It doesn't disable 16-byte alignment, because that would mean that vectorized and non-vectorized code are not mutually ABI-compatible. This ABI compatibility is very important, even for people who develop only an in-house application, as for instance one may want to have in the same application a vectorized path and a non-vectorized path.
*/
diff --git a/doc/UsingBlasLapackBackends.dox b/doc/UsingBlasLapackBackends.dox
new file mode 100644
index 000000000..caa597122
--- /dev/null
+++ b/doc/UsingBlasLapackBackends.dox
@@ -0,0 +1,133 @@
+/*
+ Copyright (c) 2011, Intel Corporation. All rights reserved.
+ Copyright (C) 2011-2016 Gael Guennebaud <gael.guennebaud@inria.fr>
+
+ Redistribution and use in source and binary forms, with or without modification,
+ are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors may
+ be used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ ********************************************************************************
+ * Content : Documentation on the use of BLAS/LAPACK libraries through Eigen
+ ********************************************************************************
+*/
+
+namespace Eigen {
+
+/** \page TopicUsingBlasLapack Using BLAS/LAPACK from %Eigen
+
+
+Since %Eigen version 3.3 and later, any F77 compatible BLAS or LAPACK libraries can be used as backends for dense matrix products and dense matrix decompositions.
+For instance, one can use <a href="http://eigen.tuxfamily.org/Counter/redirect_to_mkl.php">Intel® MKL</a>, Apple's Accelerate framework on OSX, <a href="http://www.openblas.net/">OpenBLAS</a>, <a href="http://www.netlib.org/lapack">Netlib LAPACK</a>, etc.
+
+Do not miss this \link TopicUsingIntelMKL page \endlink for further discussions on the specific use of Intel® MKL (also includes VML, PARDISO, etc.)
+
+In order to use an external BLAS and/or LAPACK library, you must link you own application to the respective libraries and their dependencies.
+For LAPACK, you must also link to the standard <a href="http://www.netlib.org/lapack/lapacke.html">Lapacke</a> library, which is used as a convenient think layer between %Eigen's C++ code and LAPACK F77 interface. Then you must activate their usage by defining one or multiple of the following macros (\b before including any %Eigen's header):
+
+\note For Mac users, in order to use the lapack version shipped with the Accelerate framework, you also need the lapacke library.
+Using <a href="https://www.macports.org/">MacPorts</a>, this is as easy as:
+\code
+sudo port install lapack
+\endcode
+and then use the following link flags: \c -framework \c Accelerate \c /opt/local/lib/lapack/liblapacke.dylib
+
+<table class="manual">
+<tr><td>\c EIGEN_USE_BLAS </td><td>Enables the use of external BLAS level 2 and 3 routines (compatible with any F77 BLAS interface)</td></tr>
+<tr class="alt"><td>\c EIGEN_USE_LAPACKE </td><td>Enables the use of external Lapack routines via the <a href="http://www.netlib.org/lapack/lapacke.html">Lapacke</a> C interface to Lapack (compatible with any F77 LAPACK interface)</td></tr>
+<tr><td>\c EIGEN_USE_LAPACKE_STRICT </td><td>Same as \c EIGEN_USE_LAPACKE but algorithms of lower numerical robustness are disabled. \n This currently concerns only JacobiSVD which otherwise would be replaced by \c gesvd that is less robust than Jacobi rotations.</td></tr>
+</table>
+
+When doing so, a number of %Eigen's algorithms are silently substituted with calls to BLAS or LAPACK routines.
+These substitutions apply only for \b Dynamic \b or \b large enough objects with one of the following four standard scalar types: \c float, \c double, \c complex<float>, and \c complex<double>.
+Operations on other scalar types or mixing reals and complexes will continue to use the built-in algorithms.
+
+The breadth of %Eigen functionality that can be substituted is listed in the table below.
+<table class="manual">
+<tr><th>Functional domain</th><th>Code example</th><th>BLAS/LAPACK routines</th></tr>
+<tr><td>Matrix-matrix operations \n \c EIGEN_USE_BLAS </td><td>\code
+m1*m2.transpose();
+m1.selfadjointView<Lower>()*m2;
+m1*m2.triangularView<Upper>();
+m1.selfadjointView<Lower>().rankUpdate(m2,1.0);
+\endcode</td><td>\code
+?gemm
+?symm/?hemm
+?trmm
+dsyrk/ssyrk
+\endcode</td></tr>
+<tr class="alt"><td>Matrix-vector operations \n \c EIGEN_USE_BLAS </td><td>\code
+m1.adjoint()*b;
+m1.selfadjointView<Lower>()*b;
+m1.triangularView<Upper>()*b;
+\endcode</td><td>\code
+?gemv
+?symv/?hemv
+?trmv
+\endcode</td></tr>
+<tr><td>LU decomposition \n \c EIGEN_USE_LAPACKE \n \c EIGEN_USE_LAPACKE_STRICT </td><td>\code
+v1 = m1.lu().solve(v2);
+\endcode</td><td>\code
+?getrf
+\endcode</td></tr>
+<tr class="alt"><td>Cholesky decomposition \n \c EIGEN_USE_LAPACKE \n \c EIGEN_USE_LAPACKE_STRICT </td><td>\code
+v1 = m2.selfadjointView<Upper>().llt().solve(v2);
+\endcode</td><td>\code
+?potrf
+\endcode</td></tr>
+<tr><td>QR decomposition \n \c EIGEN_USE_LAPACKE \n \c EIGEN_USE_LAPACKE_STRICT </td><td>\code
+m1.householderQr();
+m1.colPivHouseholderQr();
+\endcode</td><td>\code
+?geqrf
+?geqp3
+\endcode</td></tr>
+<tr class="alt"><td>Singular value decomposition \n \c EIGEN_USE_LAPACKE </td><td>\code
+JacobiSVD<MatrixXd> svd;
+svd.compute(m1, ComputeThinV);
+\endcode</td><td>\code
+?gesvd
+\endcode</td></tr>
+<tr><td>Eigen-value decompositions \n \c EIGEN_USE_LAPACKE \n \c EIGEN_USE_LAPACKE_STRICT </td><td>\code
+EigenSolver<MatrixXd> es(m1);
+ComplexEigenSolver<MatrixXcd> ces(m1);
+SelfAdjointEigenSolver<MatrixXd> saes(m1+m1.transpose());
+GeneralizedSelfAdjointEigenSolver<MatrixXd>
+ gsaes(m1+m1.transpose(),m2+m2.transpose());
+\endcode</td><td>\code
+?gees
+?gees
+?syev/?heev
+?syev/?heev,
+?potrf
+\endcode</td></tr>
+<tr class="alt"><td>Schur decomposition \n \c EIGEN_USE_LAPACKE \n \c EIGEN_USE_LAPACKE_STRICT </td><td>\code
+RealSchur<MatrixXd> schurR(m1);
+ComplexSchur<MatrixXcd> schurC(m1);
+\endcode</td><td>\code
+?gees
+\endcode</td></tr>
+</table>
+In the examples, m1 and m2 are dense matrices and v1 and v2 are dense vectors.
+
+*/
+
+}
diff --git a/doc/UsingIntelMKL.dox b/doc/UsingIntelMKL.dox
index dbe559e53..a1a3a18f2 100644
--- a/doc/UsingIntelMKL.dox
+++ b/doc/UsingIntelMKL.dox
@@ -32,107 +32,45 @@
namespace Eigen {
-/** \page TopicUsingIntelMKL Using Intel® Math Kernel Library from Eigen
+/** \page TopicUsingIntelMKL Using Intel® MKL from %Eigen
-\section TopicUsingIntelMKL_Intro Eigen and Intel® Math Kernel Library (Intel® MKL)
+<!-- \section TopicUsingIntelMKL_Intro Eigen and Intel® Math Kernel Library (Intel® MKL) -->
+
+Since %Eigen version 3.1 and later, users can benefit from built-in Intel® Math Kernel Library (MKL) optimizations with an installed copy of Intel MKL 10.3 (or later).
-Since Eigen version 3.1 and later, users can benefit from built-in Intel MKL optimizations with an installed copy of Intel MKL 10.3 (or later).
<a href="http://eigen.tuxfamily.org/Counter/redirect_to_mkl.php"> Intel MKL </a> provides highly optimized multi-threaded mathematical routines for x86-compatible architectures.
Intel MKL is available on Linux, Mac and Windows for both Intel64 and IA32 architectures.
\note
Intel® MKL is a proprietary software and it is the responsibility of users to buy or register for community (free) Intel MKL licenses for their products. Moreover, the license of the user product has to allow linking to proprietary software that excludes any unmodified versions of the GPL.
-Using Intel MKL through Eigen is easy:
--# define the \c EIGEN_USE_MKL_ALL macro before including any Eigen's header
+Using Intel MKL through %Eigen is easy:
+-# define the \c EIGEN_USE_MKL_ALL macro before including any %Eigen's header
-# link your program to MKL libraries (see the <a href="http://software.intel.com/en-us/articles/intel-mkl-link-line-advisor/">MKL linking advisor</a>)
-# on a 64bits system, you must use the LP64 interface (not the ILP64 one)
-When doing so, a number of Eigen's algorithms are silently substituted with calls to Intel MKL routines.
+When doing so, a number of %Eigen's algorithms are silently substituted with calls to Intel MKL routines.
These substitutions apply only for \b Dynamic \b or \b large enough objects with one of the following four standard scalar types: \c float, \c double, \c complex<float>, and \c complex<double>.
Operations on other scalar types or mixing reals and complexes will continue to use the built-in algorithms.
In addition you can choose which parts will be substituted by defining one or multiple of the following macros:
<table class="manual">
-<tr><td>\c EIGEN_USE_BLAS </td><td>Enables the use of external BLAS level 2 and 3 routines (compatible with any F77 BLAS interface, not only Intel MKL)</td></tr>
-<tr class="alt"><td>\c EIGEN_USE_LAPACKE </td><td>Enables the use of external Lapack routines via the <a href="http://www.netlib.org/lapack/lapacke.html">Intel Lapacke</a> C interface to Lapack (currently works with Intel MKL only)</td></tr>
-<tr><td>\c EIGEN_USE_LAPACKE_STRICT </td><td>Same as \c EIGEN_USE_LAPACKE but algorithm of lower robustness are disabled. This currently concerns only JacobiSVD which otherwise would be replaced by \c gesvd that is less robust than Jacobi rotations.</td></tr>
+<tr><td>\c EIGEN_USE_BLAS </td><td>Enables the use of external BLAS level 2 and 3 routines</td></tr>
+<tr class="alt"><td>\c EIGEN_USE_LAPACKE </td><td>Enables the use of external Lapack routines via the <a href="http://www.netlib.org/lapack/lapacke.html">Lapacke</a> C interface to Lapack</td></tr>
+<tr><td>\c EIGEN_USE_LAPACKE_STRICT </td><td>Same as \c EIGEN_USE_LAPACKE but algorithm of lower robustness are disabled. \n This currently concerns only JacobiSVD which otherwise would be replaced by \c gesvd that is less robust than Jacobi rotations.</td></tr>
<tr class="alt"><td>\c EIGEN_USE_MKL_VML </td><td>Enables the use of Intel VML (vector operations)</td></tr>
<tr><td>\c EIGEN_USE_MKL_ALL </td><td>Defines \c EIGEN_USE_BLAS, \c EIGEN_USE_LAPACKE, and \c EIGEN_USE_MKL_VML </td></tr>
</table>
-Finally, the PARDISO sparse solver shipped with Intel MKL can be used through the \ref PardisoLU, \ref PardisoLLT and \ref PardisoLDLT classes of the \ref PardisoSupport_Module.
-
+Note that the BLAS and LAPACKE backends can be enabled for any F77 compatible BLAS and LAPACK libraries. See this \link TopicUsingBlasLapack page \endlink for the details.
-\section TopicUsingIntelMKL_SupportedFeatures List of supported features
+Finally, the PARDISO sparse solver shipped with Intel MKL can be used through the \ref PardisoLU, \ref PardisoLLT and \ref PardisoLDLT classes of the \ref PardisoSupport_Module.
-The breadth of Eigen functionality covered by Intel MKL is listed in the table below.
+The following table summarizes the list of functions covered by \c EIGEN_USE_MKL_VML:
<table class="manual">
-<tr><th>Functional domain</th><th>Code example</th><th>MKL routines</th></tr>
-<tr><td>Matrix-matrix operations \n \c EIGEN_USE_BLAS </td><td>\code
-m1*m2.transpose();
-m1.selfadjointView<Lower>()*m2;
-m1*m2.triangularView<Upper>();
-m1.selfadjointView<Lower>().rankUpdate(m2,1.0);
-\endcode</td><td>\code
-?gemm
-?symm/?hemm
-?trmm
-dsyrk/ssyrk
-\endcode</td></tr>
-<tr class="alt"><td>Matrix-vector operations \n \c EIGEN_USE_BLAS </td><td>\code
-m1.adjoint()*b;
-m1.selfadjointView<Lower>()*b;
-m1.triangularView<Upper>()*b;
-\endcode</td><td>\code
-?gemv
-?symv/?hemv
-?trmv
-\endcode</td></tr>
-<tr><td>LU decomposition \n \c EIGEN_USE_LAPACKE \n \c EIGEN_USE_LAPACKE_STRICT </td><td>\code
-v1 = m1.lu().solve(v2);
-\endcode</td><td>\code
-?getrf
-\endcode</td></tr>
-<tr class="alt"><td>Cholesky decomposition \n \c EIGEN_USE_LAPACKE \n \c EIGEN_USE_LAPACKE_STRICT </td><td>\code
-v1 = m2.selfadjointView<Upper>().llt().solve(v2);
-\endcode</td><td>\code
-?potrf
-\endcode</td></tr>
-<tr><td>QR decomposition \n \c EIGEN_USE_LAPACKE \n \c EIGEN_USE_LAPACKE_STRICT </td><td>\code
-m1.householderQr();
-m1.colPivHouseholderQr();
-\endcode</td><td>\code
-?geqrf
-?geqp3
-\endcode</td></tr>
-<tr class="alt"><td>Singular value decomposition \n \c EIGEN_USE_LAPACKE </td><td>\code
-JacobiSVD<MatrixXd> svd;
-svd.compute(m1, ComputeThinV);
-\endcode</td><td>\code
-?gesvd
-\endcode</td></tr>
-<tr><td>Eigen-value decompositions \n \c EIGEN_USE_LAPACKE \n \c EIGEN_USE_LAPACKE_STRICT </td><td>\code
-EigenSolver<MatrixXd> es(m1);
-ComplexEigenSolver<MatrixXcd> ces(m1);
-SelfAdjointEigenSolver<MatrixXd> saes(m1+m1.transpose());
-GeneralizedSelfAdjointEigenSolver<MatrixXd>
- gsaes(m1+m1.transpose(),m2+m2.transpose());
-\endcode</td><td>\code
-?gees
-?gees
-?syev/?heev
-?syev/?heev,
-?potrf
-\endcode</td></tr>
-<tr class="alt"><td>Schur decomposition \n \c EIGEN_USE_LAPACKE \n \c EIGEN_USE_LAPACKE_STRICT </td><td>\code
-RealSchur<MatrixXd> schurR(m1);
-ComplexSchur<MatrixXcd> schurC(m1);
-\endcode</td><td>\code
-?gees
-\endcode</td></tr>
-<tr><td>Vector Math \n \c EIGEN_USE_MKL_VML </td><td>\code
+<tr><th>Code example</th><th>MKL routines</th></tr>
+<tr><td>\code
v2=v1.array().sin();
v2=v1.array().asin();
v2=v1.array().cos();
@@ -156,7 +94,7 @@ v?Sqr
v?Powx
\endcode</td></tr>
</table>
-In the examples, m1 and m2 are dense matrices and v1 and v2 are dense vectors.
+In the examples, v1 and v2 are dense vectors.
\section TopicUsingIntelMKL_Links Links
diff --git a/doc/eigendoxy.css b/doc/eigendoxy.css
index 60243d870..6274e6c70 100644
--- a/doc/eigendoxy.css
+++ b/doc/eigendoxy.css
@@ -45,7 +45,7 @@ pre.fragment {
/* Common style for all Eigen's tables */
-table.example, table.manual, table.manual-vl {
+table.example, table.manual, table.manual-vl, table.manual-hl {
max-width:100%;
border-collapse: collapse;
border-style: solid;
@@ -58,7 +58,7 @@ table.example, table.manual, table.manual-vl {
-webkit-box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15);
}
-table.example th, table.manual th, table.manual-vl th {
+table.example th, table.manual th, table.manual-vl th, table.manual-hl th {
padding: 0.5em 0.5em 0.5em 0.5em;
text-align: left;
padding-right: 1em;
@@ -70,7 +70,7 @@ table.example th, table.manual th, table.manual-vl th {
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#FFFFFF', endColorstr='#F4F4E5');
}
-table.example td, table.manual td, table.manual-vl td {
+table.example td, table.manual td, table.manual-vl td, table.manual-hl td {
vertical-align:top;
border-width: 1px;
border-color: #cccccc;
@@ -108,15 +108,15 @@ table.example td {
/* standard class for the manual */
-table.manual, table.manual-vl {
+table.manual, table.manual-vl, table.manual-hl {
padding: 0.2em 0em 0.5em 0em;
}
-table.manual th, table.manual-vl th {
+table.manual th, table.manual-vl th, table.manual-hl th {
margin: 0em 0em 0.3em 0em;
}
-table.manual td, table.manual-vl td {
+table.manual td, table.manual-vl td, table.manual-hl td {
padding: 0.3em 0.5em 0.3em 0.5em;
vertical-align:top;
border-width: 1px;
@@ -136,6 +136,16 @@ table.manual-vl th.inter {
border-style: solid solid solid solid;
}
+table.manual-hl td {
+ border-color: #cccccc;
+ border-width: 1px;
+ border-style: solid none solid none;
+}
+
+table td.code {
+ font-family: monospace;
+}
+
h2 {
margin-top:2em;
border-style: none none solid none;
@@ -166,6 +176,11 @@ div.toc ul {
margin: 0.2em 0 0.4em 0.5em;
}
+span.cpp11,span.cpp14,span.cpp17 {
+ color: #119911;
+ font-weight: bold;
+}
+
/**** old Eigen's styles ****/
@@ -177,8 +192,8 @@ table.tutorial_code td {
/* Whenever doxygen meets a '\n' or a '<BR/>', it will put
- * the text containing the characted into a <p class="starttd">.
- * This little hack togehter with table.tutorial_code td.note
+ * the text containing the character into a <p class="starttd">.
+ * This little hack together with table.tutorial_code td.note
* aims at fixing this issue. */
table.tutorial_code td.note p.starttd {
margin: 0px;
diff --git a/doc/examples/CMakeLists.txt b/doc/examples/CMakeLists.txt
index 08cf8efd7..f7a19055f 100644
--- a/doc/examples/CMakeLists.txt
+++ b/doc/examples/CMakeLists.txt
@@ -14,3 +14,8 @@ foreach(example_src ${examples_SRCS})
)
add_dependencies(all_examples ${example})
endforeach(example_src)
+
+check_cxx_compiler_flag("-std=c++11" EIGEN_COMPILER_SUPPORT_CPP11)
+if(EIGEN_COMPILER_SUPPORT_CPP11)
+ei_add_target_property(nullary_indexing COMPILE_FLAGS "-std=c++11")
+endif() \ No newline at end of file
diff --git a/doc/examples/Cwise_erf.cpp b/doc/examples/Cwise_erf.cpp
new file mode 100644
index 000000000..e7cd2c1c0
--- /dev/null
+++ b/doc/examples/Cwise_erf.cpp
@@ -0,0 +1,9 @@
+#include <Eigen/Core>
+#include <unsupported/Eigen/SpecialFunctions>
+#include <iostream>
+using namespace Eigen;
+int main()
+{
+ Array4d v(-0.5,2,0,-7);
+ std::cout << v.erf() << std::endl;
+}
diff --git a/doc/examples/Cwise_erfc.cpp b/doc/examples/Cwise_erfc.cpp
new file mode 100644
index 000000000..d8bb04c30
--- /dev/null
+++ b/doc/examples/Cwise_erfc.cpp
@@ -0,0 +1,9 @@
+#include <Eigen/Core>
+#include <unsupported/Eigen/SpecialFunctions>
+#include <iostream>
+using namespace Eigen;
+int main()
+{
+ Array4d v(-0.5,2,0,-7);
+ std::cout << v.erfc() << std::endl;
+}
diff --git a/doc/examples/Cwise_lgamma.cpp b/doc/examples/Cwise_lgamma.cpp
new file mode 100644
index 000000000..f1c4f503e
--- /dev/null
+++ b/doc/examples/Cwise_lgamma.cpp
@@ -0,0 +1,9 @@
+#include <Eigen/Core>
+#include <unsupported/Eigen/SpecialFunctions>
+#include <iostream>
+using namespace Eigen;
+int main()
+{
+ Array4d v(0.5,10,0,-1);
+ std::cout << v.lgamma() << std::endl;
+} \ No newline at end of file
diff --git a/doc/examples/TutorialInplaceLU.cpp b/doc/examples/TutorialInplaceLU.cpp
new file mode 100644
index 000000000..cb9c59b60
--- /dev/null
+++ b/doc/examples/TutorialInplaceLU.cpp
@@ -0,0 +1,61 @@
+#include <iostream>
+struct init {
+ init() { std::cout << "[" << "init" << "]" << std::endl; }
+};
+init init_obj;
+// [init]
+#include <iostream>
+#include <Eigen/Dense>
+
+using namespace std;
+using namespace Eigen;
+
+int main()
+{
+ MatrixXd A(2,2);
+ A << 2, -1, 1, 3;
+ cout << "Here is the input matrix A before decomposition:\n" << A << endl;
+cout << "[init]" << endl;
+
+cout << "[declaration]" << endl;
+ PartialPivLU<Ref<MatrixXd> > lu(A);
+ cout << "Here is the input matrix A after decomposition:\n" << A << endl;
+cout << "[declaration]" << endl;
+
+cout << "[matrixLU]" << endl;
+ cout << "Here is the matrix storing the L and U factors:\n" << lu.matrixLU() << endl;
+cout << "[matrixLU]" << endl;
+
+cout << "[solve]" << endl;
+ MatrixXd A0(2,2); A0 << 2, -1, 1, 3;
+ VectorXd b(2); b << 1, 2;
+ VectorXd x = lu.solve(b);
+ cout << "Residual: " << (A0 * x - b).norm() << endl;
+cout << "[solve]" << endl;
+
+cout << "[modifyA]" << endl;
+ A << 3, 4, -2, 1;
+ x = lu.solve(b);
+ cout << "Residual: " << (A0 * x - b).norm() << endl;
+cout << "[modifyA]" << endl;
+
+cout << "[recompute]" << endl;
+ A0 = A; // save A
+ lu.compute(A);
+ x = lu.solve(b);
+ cout << "Residual: " << (A0 * x - b).norm() << endl;
+cout << "[recompute]" << endl;
+
+cout << "[recompute_bis0]" << endl;
+ MatrixXd A1(2,2);
+ A1 << 5,-2,3,4;
+ lu.compute(A1);
+ cout << "Here is the input matrix A1 after decomposition:\n" << A1 << endl;
+cout << "[recompute_bis0]" << endl;
+
+cout << "[recompute_bis1]" << endl;
+ x = lu.solve(b);
+ cout << "Residual: " << (A1 * x - b).norm() << endl;
+cout << "[recompute_bis1]" << endl;
+
+}
diff --git a/doc/examples/make_circulant2.cpp b/doc/examples/make_circulant2.cpp
new file mode 100644
index 000000000..95d3dd31a
--- /dev/null
+++ b/doc/examples/make_circulant2.cpp
@@ -0,0 +1,52 @@
+#include <Eigen/Core>
+#include <iostream>
+
+using namespace Eigen;
+
+// [circulant_func]
+template<class ArgType>
+class circulant_functor {
+ const ArgType &m_vec;
+public:
+ circulant_functor(const ArgType& arg) : m_vec(arg) {}
+
+ const typename ArgType::Scalar& operator() (Index row, Index col) const {
+ Index index = row - col;
+ if (index < 0) index += m_vec.size();
+ return m_vec(index);
+ }
+};
+// [circulant_func]
+
+// [square]
+template<class ArgType>
+struct circulant_helper {
+ typedef Matrix<typename ArgType::Scalar,
+ ArgType::SizeAtCompileTime,
+ ArgType::SizeAtCompileTime,
+ ColMajor,
+ ArgType::MaxSizeAtCompileTime,
+ ArgType::MaxSizeAtCompileTime> MatrixType;
+};
+// [square]
+
+// [makeCirculant]
+template <class ArgType>
+CwiseNullaryOp<circulant_functor<ArgType>, typename circulant_helper<ArgType>::MatrixType>
+makeCirculant(const Eigen::MatrixBase<ArgType>& arg)
+{
+ typedef typename circulant_helper<ArgType>::MatrixType MatrixType;
+ return MatrixType::NullaryExpr(arg.size(), arg.size(), circulant_functor<ArgType>(arg.derived()));
+}
+// [makeCirculant]
+
+// [main]
+int main()
+{
+ Eigen::VectorXd vec(4);
+ vec << 1, 2, 4, 8;
+ Eigen::MatrixXd mat;
+ mat = makeCirculant(vec);
+ std::cout << mat << std::endl;
+}
+// [main]
diff --git a/doc/examples/nullary_indexing.cpp b/doc/examples/nullary_indexing.cpp
new file mode 100644
index 000000000..e27c3585a
--- /dev/null
+++ b/doc/examples/nullary_indexing.cpp
@@ -0,0 +1,66 @@
+#include <Eigen/Core>
+#include <iostream>
+
+using namespace Eigen;
+
+// [functor]
+template<class ArgType, class RowIndexType, class ColIndexType>
+class indexing_functor {
+ const ArgType &m_arg;
+ const RowIndexType &m_rowIndices;
+ const ColIndexType &m_colIndices;
+public:
+ typedef Matrix<typename ArgType::Scalar,
+ RowIndexType::SizeAtCompileTime,
+ ColIndexType::SizeAtCompileTime,
+ ArgType::Flags&RowMajorBit?RowMajor:ColMajor,
+ RowIndexType::MaxSizeAtCompileTime,
+ ColIndexType::MaxSizeAtCompileTime> MatrixType;
+
+ indexing_functor(const ArgType& arg, const RowIndexType& row_indices, const ColIndexType& col_indices)
+ : m_arg(arg), m_rowIndices(row_indices), m_colIndices(col_indices)
+ {}
+
+ const typename ArgType::Scalar& operator() (Index row, Index col) const {
+ return m_arg(m_rowIndices[row], m_colIndices[col]);
+ }
+};
+// [functor]
+
+// [function]
+template <class ArgType, class RowIndexType, class ColIndexType>
+CwiseNullaryOp<indexing_functor<ArgType,RowIndexType,ColIndexType>, typename indexing_functor<ArgType,RowIndexType,ColIndexType>::MatrixType>
+indexing(const Eigen::MatrixBase<ArgType>& arg, const RowIndexType& row_indices, const ColIndexType& col_indices)
+{
+ typedef indexing_functor<ArgType,RowIndexType,ColIndexType> Func;
+ typedef typename Func::MatrixType MatrixType;
+ return MatrixType::NullaryExpr(row_indices.size(), col_indices.size(), Func(arg.derived(), row_indices, col_indices));
+}
+// [function]
+
+
+int main()
+{
+ std::cout << "[main1]\n";
+ Eigen::MatrixXi A = Eigen::MatrixXi::Random(4,4);
+ Array3i ri(1,2,1);
+ ArrayXi ci(6); ci << 3,2,1,0,0,2;
+ Eigen::MatrixXi B = indexing(A, ri, ci);
+ std::cout << "A =" << std::endl;
+ std::cout << A << std::endl << std::endl;
+ std::cout << "A([" << ri.transpose() << "], [" << ci.transpose() << "]) =" << std::endl;
+ std::cout << B << std::endl;
+ std::cout << "[main1]\n";
+
+ std::cout << "[main2]\n";
+ B = indexing(A, ri+1, ci);
+ std::cout << "A(ri+1,ci) =" << std::endl;
+ std::cout << B << std::endl << std::endl;
+#if __cplusplus >= 201103L
+ B = indexing(A, ArrayXi::LinSpaced(13,0,12).unaryExpr([](int x){return x%4;}), ArrayXi::LinSpaced(4,0,3));
+ std::cout << "A(ArrayXi::LinSpaced(13,0,12).unaryExpr([](int x){return x%4;}), ArrayXi::LinSpaced(4,0,3)) =" << std::endl;
+ std::cout << B << std::endl << std::endl;
+#endif
+ std::cout << "[main2]\n";
+}
+
diff --git a/doc/ftv2node.png b/doc/ftv2node.png
new file mode 100644
index 000000000..63c605bb4
--- /dev/null
+++ b/doc/ftv2node.png
Binary files differ
diff --git a/doc/ftv2pnode.png b/doc/ftv2pnode.png
new file mode 100644
index 000000000..c6ee22f93
--- /dev/null
+++ b/doc/ftv2pnode.png
Binary files differ
diff --git a/doc/snippets/CMakeLists.txt b/doc/snippets/CMakeLists.txt
index 1135900cf..1baf32fba 100644
--- a/doc/snippets/CMakeLists.txt
+++ b/doc/snippets/CMakeLists.txt
@@ -24,5 +24,3 @@ foreach(snippet_src ${snippets_SRCS})
set_source_files_properties(${CMAKE_CURRENT_BINARY_DIR}/${compile_snippet_src}
PROPERTIES OBJECT_DEPENDS ${snippet_src})
endforeach(snippet_src)
-
-ei_add_target_property(compile_tut_arithmetic_transpose_aliasing COMPILE_FLAGS -DEIGEN_NO_DEBUG)
diff --git a/doc/snippets/Cwise_erf.cpp b/doc/snippets/Cwise_erf.cpp
deleted file mode 100644
index 7f51c1b6a..000000000
--- a/doc/snippets/Cwise_erf.cpp
+++ /dev/null
@@ -1,2 +0,0 @@
-Array4d v(-0.5,2,0,-7);
-cout << v.erf() << endl;
diff --git a/doc/snippets/Cwise_erfc.cpp b/doc/snippets/Cwise_erfc.cpp
deleted file mode 100644
index f0453d4b1..000000000
--- a/doc/snippets/Cwise_erfc.cpp
+++ /dev/null
@@ -1,2 +0,0 @@
-Array4d v(-0.5,2,0,-7);
-cout << v.erfc() << endl;
diff --git a/doc/snippets/Cwise_lgamma.cpp b/doc/snippets/Cwise_lgamma.cpp
deleted file mode 100644
index cbc69b989..000000000
--- a/doc/snippets/Cwise_lgamma.cpp
+++ /dev/null
@@ -1,2 +0,0 @@
-Array4d v(0.5,10,0,-1);
-cout << v.lgamma() << endl; \ No newline at end of file
diff --git a/doc/snippets/SparseMatrix_coeffs.cpp b/doc/snippets/SparseMatrix_coeffs.cpp
new file mode 100644
index 000000000..f71a69b07
--- /dev/null
+++ b/doc/snippets/SparseMatrix_coeffs.cpp
@@ -0,0 +1,9 @@
+SparseMatrix<double> A(3,3);
+A.insert(1,2) = 0;
+A.insert(0,1) = 1;
+A.insert(2,0) = 2;
+A.makeCompressed();
+cout << "The matrix A is:" << endl << MatrixXd(A) << endl;
+cout << "it has " << A.nonZeros() << " stored non zero coefficients that are: " << A.coeffs().transpose() << endl;
+A.coeffs() += 10;
+cout << "After adding 10 to every stored non zero coefficient, the matrix A is:" << endl << MatrixXd(A) << endl;
diff --git a/doc/snippets/compile_snippet.cpp.in b/doc/snippets/compile_snippet.cpp.in
index fdae39bcf..d63f371a3 100644
--- a/doc/snippets/compile_snippet.cpp.in
+++ b/doc/snippets/compile_snippet.cpp.in
@@ -1,5 +1,8 @@
-#include <Eigen/Eigen>
+static bool eigen_did_assert = false;
+#define eigen_assert(X) if(!eigen_did_assert && !(X)){ std::cout << "### Assertion raised in " << __FILE__ << ":" << __LINE__ << ":\n" #X << "\n### The following would happen without assertions:\n"; eigen_did_assert = true;}
+
#include <iostream>
+#include <Eigen/Eigen>
#ifndef M_PI
#define M_PI 3.1415926535897932384626433832795
diff --git a/doc/special_examples/random_cpp11.cpp b/doc/special_examples/random_cpp11.cpp
index adc3c110c..33744c051 100644
--- a/doc/special_examples/random_cpp11.cpp
+++ b/doc/special_examples/random_cpp11.cpp
@@ -7,7 +7,7 @@ using namespace Eigen;
int main() {
std::default_random_engine generator;
std::poisson_distribution<int> distribution(4.1);
- auto poisson = [&] (Eigen::Index) {return distribution(generator);};
+ auto poisson = [&] () {return distribution(generator);};
RowVectorXi v = RowVectorXi::NullaryExpr(10, poisson );
std::cout << v << "\n";