aboutsummaryrefslogtreecommitdiffhomepage
path: root/doc
diff options
context:
space:
mode:
Diffstat (limited to 'doc')
-rw-r--r--doc/A05_PortingFrom2To3.dox6
-rw-r--r--doc/AsciiQuickReference.txt113
-rw-r--r--doc/Doxyfile.in7
-rw-r--r--doc/Manual.dox2
-rw-r--r--doc/PreprocessorDirectives.dox4
-rw-r--r--doc/QuickReference.dox2
-rw-r--r--doc/SparseLinearSystems.dox20
-rw-r--r--doc/SparseQuickReference.dox62
-rw-r--r--doc/StructHavingEigenMembers.dox6
-rw-r--r--doc/TemplateKeyword.dox8
-rw-r--r--doc/TopicAliasing.dox30
-rw-r--r--doc/TopicLazyEvaluation.dox4
-rw-r--r--doc/TutorialArrayClass.dox2
-rw-r--r--doc/TutorialReductionsVisitorsBroadcasting.dox18
-rw-r--r--doc/TutorialReshapeSlicing.dox65
-rw-r--r--doc/TutorialSparse.dox31
-rw-r--r--doc/UnalignedArrayAssert.dox15
-rw-r--r--doc/UsingIntelMKL.dox4
-rw-r--r--doc/snippets/Cwise_erf.cpp2
-rw-r--r--doc/snippets/Cwise_erfc.cpp2
-rw-r--r--doc/snippets/Cwise_lgamma.cpp2
-rw-r--r--doc/snippets/Cwise_sign.cpp2
-rw-r--r--doc/snippets/MatrixBase_cwiseSign.cpp4
-rw-r--r--doc/snippets/TopicAliasing_mult4.cpp5
-rw-r--r--doc/snippets/TopicAliasing_mult5.cpp5
-rw-r--r--doc/snippets/Tutorial_AdvancedInitialization_Join.cpp2
-rw-r--r--doc/snippets/Tutorial_ReshapeMat2Mat.cpp6
-rw-r--r--doc/snippets/Tutorial_ReshapeMat2Vec.cpp11
-rw-r--r--doc/snippets/Tutorial_SlicingCol.cpp11
-rw-r--r--doc/snippets/Tutorial_SlicingVec.cpp4
30 files changed, 323 insertions, 132 deletions
diff --git a/doc/A05_PortingFrom2To3.dox b/doc/A05_PortingFrom2To3.dox
index 2d9182bbb..0dbddb976 100644
--- a/doc/A05_PortingFrom2To3.dox
+++ b/doc/A05_PortingFrom2To3.dox
@@ -9,8 +9,8 @@ and gives tips to help porting your application from Eigen2 to Eigen3.
\section CompatibilitySupport Eigen2 compatibility support
-Up to version 3.2 %Eigen provides <a href="http://eigen.tuxfamily.org/dox/Eigen2SupportModes.html">Eigen2 support modes</a>. These are removed now, because they were barely used anymore and became hard to maintain after internal re-designs.
-You can still use them by first <a href="http://eigen.tuxfamily.org/dox/Eigen2ToEigen3.html">porting your code to Eigen 3.2</a>.
+Up to version 3.2 %Eigen provides <a href="http://eigen.tuxfamily.org/dox-3.2/Eigen2SupportModes.html">Eigen2 support modes</a>. These are removed now, because they were barely used anymore and became hard to maintain after internal re-designs.
+You can still use them by first <a href="http://eigen.tuxfamily.org/dox-3.2/Eigen2ToEigen3.html">porting your code to Eigen 3.2</a>.
\section Using The USING_PART_OF_NAMESPACE_EIGEN macro
@@ -223,7 +223,7 @@ triangular part to work on</td></tr>
\section GeometryModule Changes in the Geometry module
-The Geometry module is the one that changed the most. If you rely heavily on it, it's probably a good idea to use the \ref Eigen2SupportModes "Eigen 2 support modes" to perform your migration.
+The Geometry module is the one that changed the most. If you rely heavily on it, it's probably a good idea to use the <a href="http://eigen.tuxfamily.org/dox-3.2/Eigen2SupportModes.html">"Eigen 2 support modes"</a> to perform your migration.
\section Transform The Transform class
diff --git a/doc/AsciiQuickReference.txt b/doc/AsciiQuickReference.txt
index b5bdfa1f4..9599df60b 100644
--- a/doc/AsciiQuickReference.txt
+++ b/doc/AsciiQuickReference.txt
@@ -32,17 +32,19 @@ A << 1, 2, 3, // Initialize A. The elements can also be
B << A, A, A; // B is three horizontally stacked A's.
A.fill(10); // Fill A with all 10's.
-// Eigen // Matlab
-MatrixXd::Identity(rows,cols) // eye(rows,cols)
-C.setIdentity(rows,cols) // C = eye(rows,cols)
-MatrixXd::Zero(rows,cols) // zeros(rows,cols)
-C.setZero(rows,cols) // C = ones(rows,cols)
-MatrixXd::Ones(rows,cols) // ones(rows,cols)
-C.setOnes(rows,cols) // C = ones(rows,cols)
-MatrixXd::Random(rows,cols) // rand(rows,cols)*2-1 // MatrixXd::Random returns uniform random numbers in (-1, 1).
-C.setRandom(rows,cols) // C = rand(rows,cols)*2-1
-VectorXd::LinSpaced(size,low,high) // linspace(low,high,size)'
-v.setLinSpaced(size,low,high) // v = linspace(low,high,size)'
+// Eigen // Matlab
+MatrixXd::Identity(rows,cols) // eye(rows,cols)
+C.setIdentity(rows,cols) // C = eye(rows,cols)
+MatrixXd::Zero(rows,cols) // zeros(rows,cols)
+C.setZero(rows,cols) // C = ones(rows,cols)
+MatrixXd::Ones(rows,cols) // ones(rows,cols)
+C.setOnes(rows,cols) // C = ones(rows,cols)
+MatrixXd::Random(rows,cols) // rand(rows,cols)*2-1 // MatrixXd::Random returns uniform random numbers in (-1, 1).
+C.setRandom(rows,cols) // C = rand(rows,cols)*2-1
+VectorXd::LinSpaced(size,low,high) // linspace(low,high,size)'
+v.setLinSpaced(size,low,high) // v = linspace(low,high,size)'
+VectorXi::LinSpaced(((hi-low)/step)+1, // low:step:hi
+ low,low+step*(size-1)) //
// Matrix slicing and blocks. All expressions listed here are read/write.
@@ -85,13 +87,17 @@ P.bottomRightCorner<rows,cols>() // P(end-rows+1:end, end-cols+1:end)
R.row(i) = P.col(j); // R(i, :) = P(:, i)
R.col(j1).swap(mat1.col(j2)); // R(:, [j1 j2]) = R(:, [j2, j1])
-// Views, transpose, etc; all read-write except for .adjoint().
+// Views, transpose, etc;
// Eigen // Matlab
R.adjoint() // R'
-R.transpose() // R.' or conj(R')
-R.diagonal() // diag(R)
+R.transpose() // R.' or conj(R') // Read-write
+R.diagonal() // diag(R) // Read-write
x.asDiagonal() // diag(x)
-R.transpose().colwise().reverse(); // rot90(R)
+R.transpose().colwise().reverse() // rot90(R) // Read-write
+R.rowwise().reverse() // fliplr(R)
+R.colwise().reverse() // flipud(R)
+R.replicate(i,j) // repmat(P,i,j)
+
// All the same as Matlab, but matlab doesn't have *= style operators.
// Matrix-vector. Matrix-matrix. Matrix-scalar.
@@ -103,37 +109,40 @@ a *= M; R = P + Q; R = P/s;
R -= Q; R /= s;
// Vectorized operations on each element independently
-// Eigen // Matlab
-R = P.cwiseProduct(Q); // R = P .* Q
-R = P.array() * s.array();// R = P .* s
-R = P.cwiseQuotient(Q); // R = P ./ Q
-R = P.array() / Q.array();// R = P ./ Q
-R = P.array() + s.array();// R = P + s
-R = P.array() - s.array();// R = P - s
-R.array() += s; // R = R + s
-R.array() -= s; // R = R - s
-R.array() < Q.array(); // R < Q
-R.array() <= Q.array(); // R <= Q
-R.cwiseInverse(); // 1 ./ P
-R.array().inverse(); // 1 ./ P
-R.array().sin() // sin(P)
-R.array().cos() // cos(P)
-R.array().pow(s) // P .^ s
-R.array().square() // P .^ 2
-R.array().cube() // P .^ 3
-R.cwiseSqrt() // sqrt(P)
-R.array().sqrt() // sqrt(P)
-R.array().exp() // exp(P)
-R.array().log() // log(P)
-R.cwiseMax(P) // max(R, P)
-R.array().max(P.array()) // max(R, P)
-R.cwiseMin(P) // min(R, P)
-R.array().min(P.array()) // min(R, P)
-R.cwiseAbs() // abs(P)
-R.array().abs() // abs(P)
-R.cwiseAbs2() // abs(P.^2)
-R.array().abs2() // abs(P.^2)
-(R.array() < s).select(P,Q); // (R < s ? P : Q)
+// Eigen // Matlab
+R = P.cwiseProduct(Q); // R = P .* Q
+R = P.array() * s.array(); // R = P .* s
+R = P.cwiseQuotient(Q); // R = P ./ Q
+R = P.array() / Q.array(); // R = P ./ Q
+R = P.array() + s.array(); // R = P + s
+R = P.array() - s.array(); // R = P - s
+R.array() += s; // R = R + s
+R.array() -= s; // R = R - s
+R.array() < Q.array(); // R < Q
+R.array() <= Q.array(); // R <= Q
+R.cwiseInverse(); // 1 ./ P
+R.array().inverse(); // 1 ./ P
+R.array().sin() // sin(P)
+R.array().cos() // cos(P)
+R.array().pow(s) // P .^ s
+R.array().square() // P .^ 2
+R.array().cube() // P .^ 3
+R.cwiseSqrt() // sqrt(P)
+R.array().sqrt() // sqrt(P)
+R.array().exp() // exp(P)
+R.array().log() // log(P)
+R.cwiseMax(P) // max(R, P)
+R.array().max(P.array()) // max(R, P)
+R.cwiseMin(P) // min(R, P)
+R.array().min(P.array()) // min(R, P)
+R.cwiseAbs() // abs(P)
+R.array().abs() // abs(P)
+R.cwiseAbs2() // abs(P.^2)
+R.array().abs2() // abs(P.^2)
+(R.array() < s).select(P,Q ); // (R < s ? P : Q)
+R = (Q.array()==0).select(P,A) // R(Q==0) = P(Q==0)
+R = P.unaryExpr(ptr_fun(func)) // R = arrayfun(func, P) // with: scalar func(const scalar &x);
+
// Reductions.
int r, c;
@@ -164,12 +173,12 @@ x.dot(y) // dot(x, y)
x.cross(y) // cross(x, y) Requires #include <Eigen/Geometry>
//// Type conversion
-// Eigen // Matlab
-A.cast<double>(); // double(A)
-A.cast<float>(); // single(A)
-A.cast<int>(); // int32(A)
-A.real(); // real(A)
-A.imag(); // imag(A)
+// Eigen // Matlab
+A.cast<double>(); // double(A)
+A.cast<float>(); // single(A)
+A.cast<int>(); // int32(A)
+A.real(); // real(A)
+A.imag(); // imag(A)
// if the original type equals destination type, no work is done
// Note that for most operations Eigen requires all operands to have the same type:
diff --git a/doc/Doxyfile.in b/doc/Doxyfile.in
index e0c6a7e34..0a43c7c4e 100644
--- a/doc/Doxyfile.in
+++ b/doc/Doxyfile.in
@@ -224,7 +224,8 @@ ALIASES = "only_for_vectors=This is only for vectors (either row-
"note_about_checking_solutions=This method just tries to find as good a solution as possible. If you want to check whether a solution exists or if it is accurate, just call this function to get a result and then compute the error of this result, or use MatrixBase::isApprox() directly, for instance like this: \code bool a_solution_exists = (A*result).isApprox(b, precision); \endcode This method avoids dividing by zero, so that the non-existence of a solution doesn't by itself mean that you'll get \c inf or \c nan values." \
"note_try_to_help_rvo=This function returns the result by value. In order to make that efficient, it is implemented as just a return statement using a special constructor, hopefully allowing the compiler to perform a RVO (return value optimization)." \
"nonstableyet=\warning This is not considered to be part of the stable public API yet. Changes may happen in future releases. See \ref Experimental \"Experimental parts of Eigen\"" \
- "implsparsesolverconcept=This class follows the \link TutorialSparseSolverConcept sparse solver concept \endlink."
+ "implsparsesolverconcept=This class follows the \link TutorialSparseSolverConcept sparse solver concept \endlink." \
+ "blank= "
ALIASES += "eigenAutoToc= "
@@ -273,7 +274,7 @@ OPTIMIZE_OUTPUT_VHDL = NO
# (default is Fortran), use: inc=Fortran f=C. Note that for custom extensions
# you also need to set FILE_PATTERNS otherwise the files are not read by doxygen.
-EXTENSION_MAPPING =
+EXTENSION_MAPPING = .h=C++ no_extension=C++
# If MARKDOWN_SUPPORT is enabled (the default) then doxygen pre-processes all
# comments according to the Markdown format, which allows for more readable
@@ -803,7 +804,7 @@ EXAMPLE_RECURSIVE = NO
# directories that contain image that are included in the documentation (see
# the \image command).
-IMAGE_PATH =
+IMAGE_PATH = ${Eigen_BINARY_DIR}/doc/html
# The INPUT_FILTER tag can be used to specify a program that doxygen should
# invoke to filter for each input file. Doxygen will invoke the filter program
diff --git a/doc/Manual.dox b/doc/Manual.dox
index c10c490a7..70aaa9a42 100644
--- a/doc/Manual.dox
+++ b/doc/Manual.dox
@@ -59,6 +59,8 @@ namespace Eigen {
\ingroup DenseMatrixManipulation_chapter */
/** \addtogroup TutorialMapClass
\ingroup DenseMatrixManipulation_chapter */
+/** \addtogroup TutorialReshapeSlicing
+ \ingroup DenseMatrixManipulation_chapter */
/** \addtogroup TopicAliasing
\ingroup DenseMatrixManipulation_chapter */
/** \addtogroup TopicStorageOrders
diff --git a/doc/PreprocessorDirectives.dox b/doc/PreprocessorDirectives.dox
index 76ce2eb99..14e84bc20 100644
--- a/doc/PreprocessorDirectives.dox
+++ b/doc/PreprocessorDirectives.dox
@@ -87,9 +87,6 @@ run time. However, these assertions do cost time and can thus be turned off.
- \b EIGEN_STACK_ALLOCATION_LIMIT - defines the maximum bytes for a buffer to be allocated on the stack. For internal
temporary buffers, dynamic memory allocation is employed as a fall back. For fixed-size matrices or arrays, exceeding
this threshold raises a compile time assertion. Use 0 to set no limit. Default is 128 KB.
- - \b EIGEN_HAS_POSIX_MEMALIGN - defines whether aligned memory allocation can be performed through the \c posix_memalign
- function. The availability of \c posix_memalign is automatically checked on most platform, but this option allows to
- by-pass %Eigen's built-in rules.
\section TopicPreprocessorDirectivesPlugins Plugins
@@ -106,6 +103,7 @@ following macros are supported; none of them are defined by default.
- \b EIGEN_MATRIX_PLUGIN - filename of plugin for extending the Matrix class.
- \b EIGEN_MATRIXBASE_PLUGIN - filename of plugin for extending the MatrixBase class.
- \b EIGEN_PLAINOBJECTBASE_PLUGIN - filename of plugin for extending the PlainObjectBase class.
+ - \b EIGEN_MAPBASE_PLUGIN - filename of plugin for extending the MapBase class.
- \b EIGEN_QUATERNION_PLUGIN - filename of plugin for extending the Quaternion class.
- \b EIGEN_QUATERNIONBASE_PLUGIN - filename of plugin for extending the QuaternionBase class.
- \b EIGEN_SPARSEMATRIX_PLUGIN - filename of plugin for extending the SparseMatrix class.
diff --git a/doc/QuickReference.dox b/doc/QuickReference.dox
index 62b39b201..e19c7e3a4 100644
--- a/doc/QuickReference.dox
+++ b/doc/QuickReference.dox
@@ -21,7 +21,7 @@ The Eigen library is divided in a Core module and several additional modules. Ea
<tr class="alt"><td>\link SVD_Module SVD \endlink</td><td>\code#include <Eigen/SVD>\endcode</td><td>SVD decompositions with least-squares solver (JacobiSVD, BDCSVD)</td></tr>
<tr ><td>\link QR_Module QR \endlink</td><td>\code#include <Eigen/QR>\endcode</td><td>QR decomposition with solver (HouseholderQR, ColPivHouseholderQR, FullPivHouseholderQR)</td></tr>
<tr class="alt"><td>\link Eigenvalues_Module Eigenvalues \endlink</td><td>\code#include <Eigen/Eigenvalues>\endcode</td><td>Eigenvalue, eigenvector decompositions (EigenSolver, SelfAdjointEigenSolver, ComplexEigenSolver)</td></tr>
-<tr ><td>\link Sparse_modules Sparse \endlink</td><td>\code#include <Eigen/Sparse>\endcode</td><td>%Sparse matrix storage and related basic linear algebra (SparseMatrix, SparseVector) \n (see \ref SparseQuickRefPage for details on sparse modules)</td></tr>
+<tr ><td>\link Sparse_Module Sparse \endlink</td><td>\code#include <Eigen/Sparse>\endcode</td><td>%Sparse matrix storage and related basic linear algebra (SparseMatrix, SparseVector) \n (see \ref SparseQuickRefPage for details on sparse modules)</td></tr>
<tr class="alt"><td></td><td>\code#include <Eigen/Dense>\endcode</td><td>Includes Core, Geometry, LU, Cholesky, SVD, QR, and Eigenvalues header files</td></tr>
<tr ><td></td><td>\code#include <Eigen/Eigen>\endcode</td><td>Includes %Dense and %Sparse header files (the whole Eigen library)</td></tr>
</table>
diff --git a/doc/SparseLinearSystems.dox b/doc/SparseLinearSystems.dox
index 9fb3282e7..ee4f53a4e 100644
--- a/doc/SparseLinearSystems.dox
+++ b/doc/SparseLinearSystems.dox
@@ -15,20 +15,20 @@ They are summarized in the following tables:
<tr><th>Class</th><th>Solver kind</th><th>Matrix kind</th><th>Features related to performance</th>
<th>License</th><th class="width20em"><p>Notes</p></th></tr>
-<tr><td>SimplicialLLT \n <tt>#include<Eigen/\link SparseCholesky_Module SparseCholesky\endlink></tt></td><td>Direct LLt factorization</td><td>SPD</td><td>Fill-in reducing</td>
+<tr><td>SimplicialLLT \n <tt>\#include<Eigen/\link SparseCholesky_Module SparseCholesky\endlink></tt></td><td>Direct LLt factorization</td><td>SPD</td><td>Fill-in reducing</td>
<td>LGPL</td>
<td>SimplicialLDLT is often preferable</td></tr>
-<tr><td>SimplicialLDLT \n <tt>#include<Eigen/\link SparseCholesky_Module SparseCholesky\endlink></tt></td><td>Direct LDLt factorization</td><td>SPD</td><td>Fill-in reducing</td>
+<tr><td>SimplicialLDLT \n <tt>\#include<Eigen/\link SparseCholesky_Module SparseCholesky\endlink></tt></td><td>Direct LDLt factorization</td><td>SPD</td><td>Fill-in reducing</td>
<td>LGPL</td>
<td>Recommended for very sparse and not too large problems (e.g., 2D Poisson eq.)</td></tr>
-<tr><td>SparseLU \n <tt>#include<Eigen/\link SparseLU_Module SparseLU\endlink></tt></td> <td>LU factorization </td>
+<tr><td>SparseLU \n <tt>\#include<Eigen/\link SparseLU_Module SparseLU\endlink></tt></td> <td>LU factorization </td>
<td>Square </td><td>Fill-in reducing, Leverage fast dense algebra</td>
<td>MPL2</td>
<td>optimized for small and large problems with irregular patterns </td></tr>
-<tr><td>SparseQR \n <tt>#include<Eigen/\link SparseQR_Module SparseQR\endlink></tt></td> <td> QR factorization</td>
+<tr><td>SparseQR \n <tt>\#include<Eigen/\link SparseQR_Module SparseQR\endlink></tt></td> <td> QR factorization</td>
<td>Any, rectangular</td><td> Fill-in reducing</td>
<td>MPL2</td>
<td>recommended for least-square problems, has a basic rank-revealing feature</td></tr>
@@ -40,17 +40,17 @@ They are summarized in the following tables:
<tr><th>Class</th><th>Solver kind</th><th>Matrix kind</th><th>Supported preconditioners, [default]</th>
<th>License</th><th class="width20em"><p>Notes</p></th></tr>
-<tr><td>ConjugateGradient \n <tt>#include<Eigen/\link IterativeLinearSolvers_Module IterativeLinearSolvers\endlink></tt></td> <td>Classic iterative CG</td><td>SPD</td>
+<tr><td>ConjugateGradient \n <tt>\#include<Eigen/\link IterativeLinearSolvers_Module IterativeLinearSolvers\endlink></tt></td> <td>Classic iterative CG</td><td>SPD</td>
<td>IdentityPreconditioner, [DiagonalPreconditioner], IncompleteCholesky</td>
<td>MPL2</td>
<td>Recommended for large symmetric problems (e.g., 3D Poisson eq.)</td></tr>
-<tr><td>LeastSquaresConjugateGradient \n <tt>#include<Eigen/\link IterativeLinearSolvers_Module IterativeLinearSolvers\endlink></tt></td><td>CG for rectangular least-square problem</td><td>Rectangular</td>
+<tr><td>LeastSquaresConjugateGradient \n <tt>\#include<Eigen/\link IterativeLinearSolvers_Module IterativeLinearSolvers\endlink></tt></td><td>CG for rectangular least-square problem</td><td>Rectangular</td>
<td>IdentityPreconditioner, [LeastSquareDiagonalPreconditioner]</td>
<td>MPL2</td>
<td>Solve for min |A'Ax-b|^2 without forming A'A</td></tr>
-<tr><td>BiCGSTAB \n <tt>#include<Eigen/\link IterativeLinearSolvers_Module IterativeLinearSolvers\endlink></tt></td><td>Iterative stabilized bi-conjugate gradient</td><td>Square</td>
+<tr><td>BiCGSTAB \n <tt>\#include<Eigen/\link IterativeLinearSolvers_Module IterativeLinearSolvers\endlink></tt></td><td>Iterative stabilized bi-conjugate gradient</td><td>Square</td>
<td>IdentityPreconditioner, [DiagonalPreconditioner], IncompleteLUT</td>
<td>MPL2</td>
<td>To speedup the convergence, try it with the \ref IncompleteLUT preconditioner.</td></tr>
@@ -65,17 +65,17 @@ They are summarized in the following tables:
<td>Requires the <a href="http://pastix.gforge.inria.fr">PaStiX</a> package, \b CeCILL-C </td>
<td>optimized for tough problems and symmetric patterns</td></tr>
<tr><td>CholmodSupernodalLLT</td><td>\link CholmodSupport_Module CholmodSupport \endlink</td><td>Direct LLt factorization</td><td>SPD</td><td>Fill-in reducing, Leverage fast dense algebra</td>
- <td>Requires the <a href="http://www.cise.ufl.edu/research/sparse/SuiteSparse/">SuiteSparse</a> package, \b GPL </td>
+ <td>Requires the <a href="http://www.suitesparse.com">SuiteSparse</a> package, \b GPL </td>
<td></td></tr>
<tr><td>UmfPackLU</td><td>\link UmfPackSupport_Module UmfPackSupport \endlink</td><td>Direct LU factorization</td><td>Square</td><td>Fill-in reducing, Leverage fast dense algebra</td>
- <td>Requires the <a href="http://www.cise.ufl.edu/research/sparse/SuiteSparse/">SuiteSparse</a> package, \b GPL </td>
+ <td>Requires the <a href="http://www.suitesparse.com">SuiteSparse</a> package, \b GPL </td>
<td></td></tr>
<tr><td>SuperLU</td><td>\link SuperLUSupport_Module SuperLUSupport \endlink</td><td>Direct LU factorization</td><td>Square</td><td>Fill-in reducing, Leverage fast dense algebra</td>
<td>Requires the <a href="http://crd-legacy.lbl.gov/~xiaoye/SuperLU/">SuperLU</a> library, (BSD-like)</td>
<td></td></tr>
<tr><td>SPQR</td><td>\link SPQRSupport_Module SPQRSupport \endlink </td> <td> QR factorization </td>
<td> Any, rectangular</td><td>fill-in reducing, multithreaded, fast dense algebra</td>
- <td> requires the <a href="http://www.cise.ufl.edu/research/sparse/SuiteSparse/">SuiteSparse</a> package, \b GPL </td><td>recommended for linear least-squares problems, has a rank-revealing feature</tr>
+ <td> requires the <a href="http://www.suitesparse.com">SuiteSparse</a> package, \b GPL </td><td>recommended for linear least-squares problems, has a rank-revealing feature</tr>
</table>
Here \c SPD means symmetric positive definite.
diff --git a/doc/SparseQuickReference.dox b/doc/SparseQuickReference.dox
index d04ac35c5..e0a30edcc 100644
--- a/doc/SparseQuickReference.dox
+++ b/doc/SparseQuickReference.dox
@@ -21,7 +21,7 @@ i.e either row major or column major. The default is column major. Most arithmet
<td> Resize/Reserve</td>
<td>
\code
- sm1.resize(m,n); //Change sm1 to a m x n matrix.
+ sm1.resize(m,n); // Change sm1 to a m x n matrix.
sm1.reserve(nnz); // Allocate room for nnz nonzeros elements.
\endcode
</td>
@@ -151,10 +151,10 @@ It is easy to perform arithmetic operations on sparse matrices provided that the
<td> Permutation </td>
<td>
\code
-perm.indices(); // Reference to the vector of indices
+perm.indices(); // Reference to the vector of indices
sm1.twistedBy(perm); // Permute rows and columns
-sm2 = sm1 * perm; //Permute the columns
-sm2 = perm * sm1; // Permute the columns
+sm2 = sm1 * perm; // Permute the columns
+sm2 = perm * sm1; // Permute the columns
\endcode
</td>
<td>
@@ -181,9 +181,9 @@ sm2 = perm * sm1; // Permute the columns
\section sparseotherops Other supported operations
<table class="manual">
-<tr><th>Operations</th> <th> Code </th> <th> Notes</th> </tr>
+<tr><th style="min-width:initial"> Code </th> <th> Notes</th> </tr>
+<tr><td colspan="2">Sub-matrices</td></tr>
<tr>
-<td>Sub-matrices</td>
<td>
\code
sm1.block(startRow, startCol, rows, cols);
@@ -193,25 +193,31 @@ sm2 = perm * sm1; // Permute the columns
sm1.bottomLeftCorner( rows, cols);
sm1.bottomRightCorner( rows, cols);
\endcode
-</td> <td> </td>
+</td><td>
+Contrary to dense matrices, here <strong>all these methods are read-only</strong>.\n
+See \ref TutorialSparse_SubMatrices and below for read-write sub-matrices.
+</td>
</tr>
-<tr>
-<td> Range </td>
+<tr class="alt"><td colspan="2"> Range </td></tr>
+<tr class="alt">
<td>
\code
- sm1.innerVector(outer);
- sm1.innerVectors(start, size);
- sm1.leftCols(size);
- sm2.rightCols(size);
- sm1.middleRows(start, numRows);
- sm1.middleCols(start, numCols);
- sm1.col(j);
+ sm1.innerVector(outer); // RW
+ sm1.innerVectors(start, size); // RW
+ sm1.leftCols(size); // RW
+ sm2.rightCols(size); // RO because sm2 is row-major
+ sm1.middleRows(start, numRows); // RO becasue sm1 is column-major
+ sm1.middleCols(start, numCols); // RW
+ sm1.col(j); // RW
\endcode
</td>
-<td>A inner vector is either a row (for row-major) or a column (for column-major). As stated earlier, the evaluation can be done in a matrix with different storage order </td>
+<td>
+A inner vector is either a row (for row-major) or a column (for column-major).\n
+As stated earlier, for a read-write sub-matrix (RW), the evaluation can be done in a matrix with different storage order.
+</td>
</tr>
+<tr><td colspan="2"> Triangular and selfadjoint views</td></tr>
<tr>
-<td> Triangular and selfadjoint views</td>
<td>
\code
sm2 = sm1.triangularview<Lower>();
@@ -222,26 +228,30 @@ sm2 = perm * sm1; // Permute the columns
\code
\endcode </td>
</tr>
-<tr>
-<td>Triangular solve </td>
+<tr class="alt"><td colspan="2">Triangular solve </td></tr>
+<tr class="alt">
<td>
\code
dv2 = sm1.triangularView<Upper>().solve(dv1);
- dv2 = sm1.topLeftCorner(size, size).triangularView<Lower>().solve(dv1);
+ dv2 = sm1.topLeftCorner(size, size)
+ .triangularView<Lower>().solve(dv1);
\endcode
</td>
<td> For general sparse solve, Use any suitable module described at \ref TopicSparseSystems </td>
</tr>
+<tr><td colspan="2"> Low-level API</td></tr>
<tr>
-<td> Low-level API</td>
<td>
\code
-sm1.valuePtr(); // Pointer to the values
-sm1.innerIndextr(); // Pointer to the indices.
-sm1.outerIndexPtr(); //Pointer to the beginning of each inner vector
+sm1.valuePtr(); // Pointer to the values
+sm1.innerIndextr(); // Pointer to the indices.
+sm1.outerIndexPtr(); // Pointer to the beginning of each inner vector
\endcode
</td>
-<td> If the matrix is not in compressed form, makeCompressed() should be called before. Note that these functions are mostly provided for interoperability purposes with external libraries. A better access to the values of the matrix is done by using the InnerIterator class as described in \link TutorialSparse the Tutorial Sparse \endlink section</td>
+<td>
+If the matrix is not in compressed form, makeCompressed() should be called before.\n
+Note that these functions are mostly provided for interoperability purposes with external libraries.\n
+A better access to the values of the matrix is done by using the InnerIterator class as described in \link TutorialSparse the Tutorial Sparse \endlink section</td>
</tr>
</table>
*/
diff --git a/doc/StructHavingEigenMembers.dox b/doc/StructHavingEigenMembers.dox
index bd4fa7599..7fbed0eb0 100644
--- a/doc/StructHavingEigenMembers.dox
+++ b/doc/StructHavingEigenMembers.dox
@@ -6,7 +6,7 @@ namespace Eigen {
\section StructHavingEigenMembers_summary Executive Summary
-If you define a structure having members of \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types", you must overload its "operator new" so that it generates 16-bytes-aligned pointers. Fortunately, Eigen provides you with a macro EIGEN_MAKE_ALIGNED_OPERATOR_NEW that does that for you.
+If you define a structure having members of \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types", you must overload its "operator new" so that it generates 16-bytes-aligned pointers. Fortunately, %Eigen provides you with a macro EIGEN_MAKE_ALIGNED_OPERATOR_NEW that does that for you.
\section StructHavingEigenMembers_what What kind of code needs to be changed?
@@ -48,7 +48,7 @@ Foo *foo = new Foo;
This macro makes "new Foo" always return an aligned pointer.
-If this approach is too intrusive, see also the \ref othersolutions.
+If this approach is too intrusive, see also the \ref StructHavingEigenMembers_othersolutions "other solutions".
\section StructHavingEigenMembers_why Why is this needed?
@@ -67,7 +67,7 @@ class Foo
Foo *foo = new Foo;
\endcode
-A Eigen::Vector2d consists of 2 doubles, which is 128 bits. Which is exactly the size of a SSE packet, which makes it possible to use SSE for all sorts of operations on this vector. But SSE instructions (at least the ones that Eigen uses, which are the fast ones) require 128-bit alignment. Otherwise you get a segmentation fault.
+A Eigen::Vector2d consists of 2 doubles, which is 128 bits. Which is exactly the size of a SSE packet, which makes it possible to use SSE for all sorts of operations on this vector. But SSE instructions (at least the ones that %Eigen uses, which are the fast ones) require 128-bit alignment. Otherwise you get a segmentation fault.
For this reason, Eigen takes care by itself to require 128-bit alignment for Eigen::Vector2d, by doing two things:
\li Eigen requires 128-bit alignment for the Eigen::Vector2d's array (of 2 doubles). With GCC, this is done with a __attribute__ ((aligned(16))).
diff --git a/doc/TemplateKeyword.dox b/doc/TemplateKeyword.dox
index e06aba7ba..b84cfdae9 100644
--- a/doc/TemplateKeyword.dox
+++ b/doc/TemplateKeyword.dox
@@ -73,13 +73,13 @@ for operator<".
The reason that the \c template keyword is necessary in the last example has to do with the rules for how
templates are supposed to be compiled in C++. The compiler has to check the code for correct syntax at the
point where the template is defined, without knowing the actual value of the template arguments (\c Derived1
-and \c Derived2 in the example). That means that the compiler cannot know that <tt>dst.triangularPart</tt> is
+and \c Derived2 in the example). That means that the compiler cannot know that <tt>dst.triangularView</tt> is
a member template and that the following &lt; symbol is part of the delimiter for the template
-parameter. Another possibility would be that <tt>dst.triangularPart</tt> is a member variable with the &lt;
+parameter. Another possibility would be that <tt>dst.triangularView</tt> is a member variable with the &lt;
symbol refering to the <tt>operator&lt;()</tt> function. In fact, the compiler should choose the second
-possibility, according to the standard. If <tt>dst.triangularPart</tt> is a member template (as in our case),
+possibility, according to the standard. If <tt>dst.triangularView</tt> is a member template (as in our case),
the programmer should specify this explicitly with the \c template keyword and write <tt>dst.template
-triangularPart</tt>.
+triangularView</tt>.
The precise rules are rather complicated, but ignoring some subtleties we can summarize them as follows:
- A <em>dependent name</em> is name that depends (directly or indirectly) on a template parameter. In the
diff --git a/doc/TopicAliasing.dox b/doc/TopicAliasing.dox
index c2654aed2..a8f164428 100644
--- a/doc/TopicAliasing.dox
+++ b/doc/TopicAliasing.dox
@@ -153,10 +153,11 @@ not necessary to evaluate the right-hand side explicitly.
\section TopicAliasingMatrixMult Aliasing and matrix multiplication
-Matrix multiplication is the only operation in %Eigen that assumes aliasing by default. Thus, if \c matA is a
-matrix, then the statement <tt>matA = matA * matA;</tt> is safe. All other operations in %Eigen assume that
-there are no aliasing problems, either because the result is assigned to a different matrix or because it is a
-component-wise operation.
+Matrix multiplication is the only operation in %Eigen that assumes aliasing by default, <strong>under the
+condition that the destination matrix is not resized</strong>.
+Thus, if \c matA is a \b squared matrix, then the statement <tt>matA = matA * matA;</tt> is safe.
+All other operations in %Eigen assume that there are no aliasing problems,
+either because the result is assigned to a different matrix or because it is a component-wise operation.
<table class="example">
<tr><th>Example</th><th>Output</th></tr>
@@ -198,6 +199,27 @@ may get wrong results:
\verbinclude TopicAliasing_mult3.out
</td></tr></table>
+Moreover, starting in Eigen 3.3, aliasing is \b not assumed if the destination matrix is resized and the product is not directly assigned to the destination.
+Therefore, the following example is also wrong:
+
+<table class="example">
+<tr><th>Example</th><th>Output</th></tr>
+<tr><td>
+\include TopicAliasing_mult4.cpp
+</td>
+<td>
+\verbinclude TopicAliasing_mult4.out
+</td></tr></table>
+
+As for any aliasing issue, you can resolve it by explicitly evaluating the expression prior to assignment:
+<table class="example">
+<tr><th>Example</th><th>Output</th></tr>
+<tr><td>
+\include TopicAliasing_mult5.cpp
+</td>
+<td>
+\verbinclude TopicAliasing_mult5.out
+</td></tr></table>
\section TopicAliasingSummary Summary
diff --git a/doc/TopicLazyEvaluation.dox b/doc/TopicLazyEvaluation.dox
index 393bc41d8..101ef8c72 100644
--- a/doc/TopicLazyEvaluation.dox
+++ b/doc/TopicLazyEvaluation.dox
@@ -36,7 +36,7 @@ Here is now a more involved example:
Eigen chooses lazy evaluation at every stage in that example, which is clearly the correct choice. In fact, lazy evaluation is the "default choice" and Eigen will choose it except in a few circumstances.
-<b>The first circumstance</b> in which Eigen chooses immediate evaluation, is when it sees an assignment <tt>a = b;</tt> and the expression \c b has the evaluate-before-assigning \link flags flag\endlink. The most important example of such an expression is the \link GeneralProduct matrix product expression\endlink. For example, when you do
+<b>The first circumstance</b> in which Eigen chooses immediate evaluation, is when it sees an assignment <tt>a = b;</tt> and the expression \c b has the evaluate-before-assigning \link flags flag\endlink. The most important example of such an expression is the \link Product matrix product expression\endlink. For example, when you do
\code matrix = matrix * matrix; \endcode
@@ -48,7 +48,7 @@ What if you know that the result does no alias the operand of the product and wa
Here, since we know that matrix2 is not the same matrix as matrix1, we know that lazy evaluation is not dangerous, so we may force lazy evaluation. Concretely, the effect of noalias() here is to bypass the evaluate-before-assigning \link flags flag\endlink.
-<b>The second circumstance</b> in which Eigen chooses immediate evaluation, is when it sees a nested expression such as <tt>a + b</tt> where \c b is already an expression having the evaluate-before-nesting \link flags flag\endlink. Again, the most important example of such an expression is the \link GeneralProduct matrix product expression\endlink. For example, when you do
+<b>The second circumstance</b> in which Eigen chooses immediate evaluation, is when it sees a nested expression such as <tt>a + b</tt> where \c b is already an expression having the evaluate-before-nesting \link flags flag\endlink. Again, the most important example of such an expression is the \link Product matrix product expression\endlink. For example, when you do
\code matrix1 = matrix2 + matrix3 * matrix4; \endcode
diff --git a/doc/TutorialArrayClass.dox b/doc/TutorialArrayClass.dox
index 6432684aa..f6f351091 100644
--- a/doc/TutorialArrayClass.dox
+++ b/doc/TutorialArrayClass.dox
@@ -157,7 +157,7 @@ The following example shows how to use array operations on a Matrix object by em
* to multiply them coefficient-wise and assigns the result to the matrix variable \c result (this is legal
because Eigen allows assigning array expressions to matrix variables).
-As a matter of fact, this usage case is so common that Eigen provides a \link MatrixBase::cwiseProduct() const
+As a matter of fact, this usage case is so common that Eigen provides a \link MatrixBase::cwiseProduct const
.cwiseProduct(.) \endlink method for matrices to compute the coefficient-wise product. This is also shown in
the example program.
diff --git a/doc/TutorialReductionsVisitorsBroadcasting.dox b/doc/TutorialReductionsVisitorsBroadcasting.dox
index 908a1b4b2..f5322b4a6 100644
--- a/doc/TutorialReductionsVisitorsBroadcasting.dox
+++ b/doc/TutorialReductionsVisitorsBroadcasting.dox
@@ -32,7 +32,7 @@ Eigen also provides the \link MatrixBase::norm() norm() \endlink method, which r
These operations can also operate on matrices; in that case, a n-by-p matrix is seen as a vector of size (n*p), so for example the \link MatrixBase::norm() norm() \endlink method returns the "Frobenius" or "Hilbert-Schmidt" norm. We refrain from speaking of the \f$\ell^2\f$ norm of a matrix because that can mean different things.
-If you want other coefficient-wise \f$\ell^p\f$ norms, use the \link MatrixBase::lpNorm() lpNorm<p>() \endlink method. The template parameter \a p can take the special value \a Infinity if you want the \f$\ell^\infty\f$ norm, which is the maximum of the absolute values of the coefficients.
+If you want other coefficient-wise \f$\ell^p\f$ norms, use the \link MatrixBase::lpNorm lpNorm<p>() \endlink method. The template parameter \a p can take the special value \a Infinity if you want the \f$\ell^\infty\f$ norm, which is the maximum of the absolute values of the coefficients.
The following example demonstrates these methods.
@@ -90,7 +90,7 @@ Array.
The arguments passed to a visitor are pointers to the variables where the
row and column position are to be stored. These variables should be of type
-\link DenseBase::Index Index \endlink, as shown below:
+\link Eigen::Index Index \endlink, as shown below:
<table class="example">
<tr><th>Example:</th><th>Output:</th></tr>
@@ -101,17 +101,16 @@ row and column position are to be stored. These variables should be of type
\verbinclude Tutorial_ReductionsVisitorsBroadcasting_visitors.out
</td></tr></table>
-Note that both functions also return the value of the minimum or maximum coefficient if needed,
-as if it was a typical reduction operation.
+Both functions also return the value of the minimum or maximum coefficient.
\section TutorialReductionsVisitorsBroadcastingPartialReductions Partial reductions
Partial reductions are reductions that can operate column- or row-wise on a Matrix or
Array, applying the reduction operation on each column or row and
-returning a column or row-vector with the corresponding values. Partial reductions are applied
+returning a column or row vector with the corresponding values. Partial reductions are applied
with \link DenseBase::colwise() colwise() \endlink or \link DenseBase::rowwise() rowwise() \endlink.
A simple example is obtaining the maximum of the elements
-in each column in a given matrix, storing the result in a row-vector:
+in each column in a given matrix, storing the result in a row vector:
<table class="example">
<tr><th>Example:</th><th>Output:</th></tr>
@@ -133,8 +132,7 @@ The same operation can be performed row-wise:
\verbinclude Tutorial_ReductionsVisitorsBroadcasting_rowwise.out
</td></tr></table>
-<b>Note that column-wise operations return a 'row-vector' while row-wise operations
-return a 'column-vector'</b>
+<b>Note that column-wise operations return a row vector, while row-wise operations return a column vector.</b>
\subsection TutorialReductionsVisitorsBroadcastingPartialReductionsCombined Combining partial reductions with other operations
It is also possible to use the result of a partial reduction to do further processing.
@@ -176,7 +174,7 @@ The concept behind broadcasting is similar to partial reductions, with the diffe
constructs an expression where a vector (column or row) is interpreted as a matrix by replicating it in
one direction.
-A simple example is to add a certain column-vector to each column in a matrix.
+A simple example is to add a certain column vector to each column in a matrix.
This can be accomplished with:
<table class="example">
@@ -253,7 +251,7 @@ is a new matrix whose size is the same as matrix <tt>m</tt>: \f[
\f]
- <tt>(m.colwise() - v).colwise().squaredNorm()</tt> is a partial reduction, computing the squared norm column-wise. The result of
-this operation is a row-vector where each coefficient is the squared Euclidean distance between each column in <tt>m</tt> and <tt>v</tt>: \f[
+this operation is a row vector where each coefficient is the squared Euclidean distance between each column in <tt>m</tt> and <tt>v</tt>: \f[
\mbox{(m.colwise() - v).colwise().squaredNorm()} =
\begin{bmatrix}
1 & 505 & 32 & 50
diff --git a/doc/TutorialReshapeSlicing.dox b/doc/TutorialReshapeSlicing.dox
new file mode 100644
index 000000000..3730a5de6
--- /dev/null
+++ b/doc/TutorialReshapeSlicing.dox
@@ -0,0 +1,65 @@
+namespace Eigen {
+
+/** \eigenManualPage TutorialReshapeSlicing Reshape and Slicing
+
+%Eigen does not expose convenient methods to take slices or to reshape a matrix yet.
+Nonetheless, such features can easily be emulated using the Map class.
+
+\eigenAutoToc
+
+\section TutorialReshape Reshape
+
+A reshape operation consists in modifying the sizes of a matrix while keeping the same coefficients.
+Instead of modifying the input matrix itself, which is not possible for compile-time sizes, the approach consist in creating a different \em view on the storage using class Map.
+Here is a typical example creating a 1D linear view of a matrix:
+
+<table class="example">
+<tr><th>Example:</th><th>Output:</th></tr>
+<tr><td>
+\include Tutorial_ReshapeMat2Vec.cpp
+</td>
+<td>
+\verbinclude Tutorial_ReshapeMat2Vec.out
+</td></tr></table>
+
+Remark how the storage order of the input matrix modifies the order of the coefficients in the linear view.
+Here is another example reshaping a 2x6 matrix to a 6x2 one:
+<table class="example">
+<tr><th>Example:</th><th>Output:</th></tr>
+<tr><td>
+\include Tutorial_ReshapeMat2Mat.cpp
+</td>
+<td>
+\verbinclude Tutorial_ReshapeMat2Mat.out
+</td></tr></table>
+
+
+
+\section TutorialSlicing Slicing
+
+Slicing consists in taking a set of rows, columns, or elements, uniformly spaced within a matrix.
+Again, the class Map allows to easily mimic this feature.
+
+For instance, one can skip every P elements in a vector:
+<table class="example">
+<tr><th>Example:</th><th>Output:</th></tr>
+<tr><td>
+\include Tutorial_SlicingVec.cpp
+</td>
+<td>
+\verbinclude Tutorial_SlicingVec.out
+</td></tr></table>
+
+One can olso take one column over three using an adequate outer-stride or inner-stride depending on the actual storage order:
+<table class="example">
+<tr><th>Example:</th><th>Output:</th></tr>
+<tr><td>
+\include Tutorial_SlicingCol.cpp
+</td>
+<td>
+\verbinclude Tutorial_SlicingCol.out
+</td></tr></table>
+
+*/
+
+}
diff --git a/doc/TutorialSparse.dox b/doc/TutorialSparse.dox
index fb07adaa2..352907408 100644
--- a/doc/TutorialSparse.dox
+++ b/doc/TutorialSparse.dox
@@ -241,11 +241,11 @@ In the following \em sm denotes a sparse matrix, \em sv a sparse vector, \em dm
sm1.real() sm1.imag() -sm1 0.5*sm1
sm1+sm2 sm1-sm2 sm1.cwiseProduct(sm2)
\endcode
-However, a strong restriction is that the storage orders must match. For instance, in the following example:
+However, <strong>a strong restriction is that the storage orders must match</strong>. For instance, in the following example:
\code
sm4 = sm1 + sm2 + sm3;
\endcode
-sm1, sm2, and sm3 must all be row-major or all column major.
+sm1, sm2, and sm3 must all be row-major or all column-major.
On the other hand, there is no restriction on the target matrix sm4.
For instance, this means that for computing \f$ A^T + A \f$, the matrix \f$ A^T \f$ must be evaluated into a temporary matrix of compatible storage order:
\code
@@ -257,7 +257,14 @@ Binary coefficient wise operators can also mix sparse and dense expressions:
\code
sm2 = sm1.cwiseProduct(dm1);
dm2 = sm1 + dm1;
+dm2 = dm1 - sm1;
\endcode
+Performance-wise, the adding/subtracting sparse and dense matrices is better performed in two steps. For instance, instead of doing <tt>dm2 = sm1 + dm1</tt>, better write:
+\code
+dm2 = dm1;
+dm2 += sm1;
+\endcode
+This version has the advantage to fully exploit the higher performance of dense storage (no indirection, SIMD, etc.), and to pay the cost of slow sparse evaluation on the few non-zeros of the sparse matrix only.
%Sparse expressions also support transposition:
@@ -304,6 +311,26 @@ sm2 = sm1.transpose() * P;
\endcode
+\subsection TutorialSparse_SubMatrices Block operations
+
+Regarding read-access, sparse matrices expose the same API than for dense matrices to access to sub-matrices such as blocks, columns, and rows. See \ref TutorialBlockOperations for a detailed introduction.
+However, for performance reasons, writing to a sub-sparse-matrix is much more limited, and currently only contiguous sets of columns (resp. rows) of a column-major (resp. row-major) SparseMatrix are writable. Moreover, this information has to be known at compile-time, leaving out methods such as <tt>block(...)</tt> and <tt>corner*(...)</tt>. The available API for write-access to a SparseMatrix are summarized below:
+\code
+SparseMatrix<double,ColMajor> sm1;
+sm1.col(j) = ...;
+sm1.leftCols(ncols) = ...;
+sm1.middleCols(j,ncols) = ...;
+sm1.rightCols(ncols) = ...;
+
+SparseMatrix<double,RowMajor> sm2;
+sm2.row(i) = ...;
+sm2.topRows(nrows) = ...;
+sm2.middleRows(i,nrows) = ...;
+sm2.bottomRows(nrows) = ...;
+\endcode
+
+In addition, sparse matrices expose the SparseMatrixBase::innerVector() and SparseMatrixBase::innerVectors() methods, which are aliases to the col/middleCols methods for a column-major storage, and to the row/middleRows methods for a row-major storage.
+
\subsection TutorialSparse_TriangularSelfadjoint Triangular and selfadjoint views
Just as with dense matrices, the triangularView() function can be used to address a triangular part of the matrix, and perform triangular solves with a dense right hand side:
diff --git a/doc/UnalignedArrayAssert.dox b/doc/UnalignedArrayAssert.dox
index 8c97d7874..f0f84d25f 100644
--- a/doc/UnalignedArrayAssert.dox
+++ b/doc/UnalignedArrayAssert.dox
@@ -7,8 +7,8 @@ Hello! You are seeing this webpage because your program terminated on an asserti
my_program: path/to/eigen/Eigen/src/Core/DenseStorage.h:44:
Eigen::internal::matrix_array<T, Size, MatrixOptions, Align>::internal::matrix_array()
[with T = double, int Size = 2, int MatrixOptions = 2, bool Align = true]:
-Assertion `(reinterpret_cast<size_t>(array) & 0xf) == 0 && "this assertion
-is explained here: http://eigen.tuxfamily.org/dox/UnalignedArrayAssert.html
+Assertion `(reinterpret_cast<size_t>(array) & (sizemask)) == 0 && "this assertion
+is explained here: http://eigen.tuxfamily.org/dox-devel/group__TopicUnalignedArrayAssert.html
**** READ THIS WEB PAGE !!! ****"' failed.
</pre>
@@ -46,9 +46,9 @@ then you need to read this separate page: \ref TopicStructHavingEigenMembers "St
Note that here, Eigen::Vector2d is only used as an example, more generally the issue arises for all \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types".
-\section c2 Cause 2: STL Containers
+\section c2 Cause 2: STL Containers or manual memory allocation
-If you use STL Containers such as std::vector, std::map, ..., with Eigen objects, or with classes containing Eigen objects, like this,
+If you use STL Containers such as std::vector, std::map, ..., with %Eigen objects, or with classes containing %Eigen objects, like this,
\code
std::vector<Eigen::Matrix2f> my_vector;
@@ -60,6 +60,8 @@ then you need to read this separate page: \ref TopicStlContainers "Using STL Con
Note that here, Eigen::Matrix2f is only used as an example, more generally the issue arises for all \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types" and \ref TopicStructHavingEigenMembers "structures having such Eigen objects as member".
+The same issue will be exhibited by any classes/functions by-passing operator new to allocate memory, that is, by performing custom memory allocation followed by calls to the placement new operator. This is for instance typically the case of \c std::make_shared or \c std::allocate_shared for which is the solution is to use an \ref aligned_allocator "aligned allocator" as detailed in the \ref TopicStlContainers "solution for STL containers".
+
\section c3 Cause 3: Passing Eigen objects by value
If some function in your code is getting an Eigen object passed by value, like this,
@@ -107,7 +109,10 @@ Two possibilities:
128-bit alignment code and thus preserves ABI compatibility, but completely disables vectorization.</li>
</ul>
-For more information, see <a href="http://eigen.tuxfamily.org/index.php?title=FAQ#I_disabled_vectorization.2C_but_I.27m_still_getting_annoyed_about_alignment_issues.21">this FAQ</a>.
+If you want to know why defining EIGEN_DONT_VECTORIZE does not by itself disable 128-bit alignment and the assertion, here's the explanation:
+
+It doesn't disable the assertion, because otherwise code that runs fine without vectorization would suddenly crash when enabling vectorization.
+It doesn't disable 128bit alignment, because that would mean that vectorized and non-vectorized code are not mutually ABI-compatible. This ABI compatibility is very important, even for people who develop only an in-house application, as for instance one may want to have in the same application a vectorized path and a non-vectorized path.
*/
diff --git a/doc/UsingIntelMKL.dox b/doc/UsingIntelMKL.dox
index 84db992b6..dbe559e53 100644
--- a/doc/UsingIntelMKL.dox
+++ b/doc/UsingIntelMKL.dox
@@ -52,10 +52,10 @@ When doing so, a number of Eigen's algorithms are silently substituted with call
These substitutions apply only for \b Dynamic \b or \b large enough objects with one of the following four standard scalar types: \c float, \c double, \c complex<float>, and \c complex<double>.
Operations on other scalar types or mixing reals and complexes will continue to use the built-in algorithms.
-In addition you can coarsely select choose which parts will be substituted by defining one or multiple of the following macros:
+In addition you can choose which parts will be substituted by defining one or multiple of the following macros:
<table class="manual">
-<tr><td>\c EIGEN_USE_BLAS </td><td>Enables the use of external BLAS level 2 and 3 routines (currently works with Intel MKL only)</td></tr>
+<tr><td>\c EIGEN_USE_BLAS </td><td>Enables the use of external BLAS level 2 and 3 routines (compatible with any F77 BLAS interface, not only Intel MKL)</td></tr>
<tr class="alt"><td>\c EIGEN_USE_LAPACKE </td><td>Enables the use of external Lapack routines via the <a href="http://www.netlib.org/lapack/lapacke.html">Intel Lapacke</a> C interface to Lapack (currently works with Intel MKL only)</td></tr>
<tr><td>\c EIGEN_USE_LAPACKE_STRICT </td><td>Same as \c EIGEN_USE_LAPACKE but algorithm of lower robustness are disabled. This currently concerns only JacobiSVD which otherwise would be replaced by \c gesvd that is less robust than Jacobi rotations.</td></tr>
<tr class="alt"><td>\c EIGEN_USE_MKL_VML </td><td>Enables the use of Intel VML (vector operations)</td></tr>
diff --git a/doc/snippets/Cwise_erf.cpp b/doc/snippets/Cwise_erf.cpp
new file mode 100644
index 000000000..7f51c1b6a
--- /dev/null
+++ b/doc/snippets/Cwise_erf.cpp
@@ -0,0 +1,2 @@
+Array4d v(-0.5,2,0,-7);
+cout << v.erf() << endl;
diff --git a/doc/snippets/Cwise_erfc.cpp b/doc/snippets/Cwise_erfc.cpp
new file mode 100644
index 000000000..f0453d4b1
--- /dev/null
+++ b/doc/snippets/Cwise_erfc.cpp
@@ -0,0 +1,2 @@
+Array4d v(-0.5,2,0,-7);
+cout << v.erfc() << endl;
diff --git a/doc/snippets/Cwise_lgamma.cpp b/doc/snippets/Cwise_lgamma.cpp
new file mode 100644
index 000000000..cbc69b989
--- /dev/null
+++ b/doc/snippets/Cwise_lgamma.cpp
@@ -0,0 +1,2 @@
+Array4d v(0.5,10,0,-1);
+cout << v.lgamma() << endl; \ No newline at end of file
diff --git a/doc/snippets/Cwise_sign.cpp b/doc/snippets/Cwise_sign.cpp
new file mode 100644
index 000000000..49920e4f1
--- /dev/null
+++ b/doc/snippets/Cwise_sign.cpp
@@ -0,0 +1,2 @@
+Array3d v(-3,5,0);
+cout << v.sign() << endl;
diff --git a/doc/snippets/MatrixBase_cwiseSign.cpp b/doc/snippets/MatrixBase_cwiseSign.cpp
new file mode 100644
index 000000000..efd717955
--- /dev/null
+++ b/doc/snippets/MatrixBase_cwiseSign.cpp
@@ -0,0 +1,4 @@
+MatrixXd m(2,3);
+m << 2, -4, 6,
+ -5, 1, 0;
+cout << m.cwiseSign() << endl;
diff --git a/doc/snippets/TopicAliasing_mult4.cpp b/doc/snippets/TopicAliasing_mult4.cpp
new file mode 100644
index 000000000..8a8992f6c
--- /dev/null
+++ b/doc/snippets/TopicAliasing_mult4.cpp
@@ -0,0 +1,5 @@
+MatrixXf A(2,2), B(3,2);
+B << 2, 0, 0, 3, 1, 1;
+A << 2, 0, 0, -2;
+A = (B * A).cwiseAbs();
+cout << A; \ No newline at end of file
diff --git a/doc/snippets/TopicAliasing_mult5.cpp b/doc/snippets/TopicAliasing_mult5.cpp
new file mode 100644
index 000000000..1a36defde
--- /dev/null
+++ b/doc/snippets/TopicAliasing_mult5.cpp
@@ -0,0 +1,5 @@
+MatrixXf A(2,2), B(3,2);
+B << 2, 0, 0, 3, 1, 1;
+A << 2, 0, 0, -2;
+A = (B * A).eval().cwiseAbs();
+cout << A;
diff --git a/doc/snippets/Tutorial_AdvancedInitialization_Join.cpp b/doc/snippets/Tutorial_AdvancedInitialization_Join.cpp
index 84e8715cb..55a21539d 100644
--- a/doc/snippets/Tutorial_AdvancedInitialization_Join.cpp
+++ b/doc/snippets/Tutorial_AdvancedInitialization_Join.cpp
@@ -3,7 +3,7 @@ vec1 << 1, 2, 3;
std::cout << "vec1 = " << vec1 << std::endl;
RowVectorXd vec2(4);
-vec2 << 1, 4, 9, 16;;
+vec2 << 1, 4, 9, 16;
std::cout << "vec2 = " << vec2 << std::endl;
RowVectorXd joined(7);
diff --git a/doc/snippets/Tutorial_ReshapeMat2Mat.cpp b/doc/snippets/Tutorial_ReshapeMat2Mat.cpp
new file mode 100644
index 000000000..f84d6e76d
--- /dev/null
+++ b/doc/snippets/Tutorial_ReshapeMat2Mat.cpp
@@ -0,0 +1,6 @@
+MatrixXf M1(2,6); // Column-major storage
+M1 << 1, 2, 3, 4, 5, 6,
+ 7, 8, 9, 10, 11, 12;
+
+Map<MatrixXf> M2(M1.data(), 6,2);
+cout << "M2:" << endl << M2 << endl; \ No newline at end of file
diff --git a/doc/snippets/Tutorial_ReshapeMat2Vec.cpp b/doc/snippets/Tutorial_ReshapeMat2Vec.cpp
new file mode 100644
index 000000000..95bd4e0e6
--- /dev/null
+++ b/doc/snippets/Tutorial_ReshapeMat2Vec.cpp
@@ -0,0 +1,11 @@
+MatrixXf M1(3,3); // Column-major storage
+M1 << 1, 2, 3,
+ 4, 5, 6,
+ 7, 8, 9;
+
+Map<RowVectorXf> v1(M1.data(), M1.size());
+cout << "v1:" << endl << v1 << endl;
+
+Matrix<float,Dynamic,Dynamic,RowMajor> M2(M1);
+Map<RowVectorXf> v2(M2.data(), M2.size());
+cout << "v2:" << endl << v2 << endl; \ No newline at end of file
diff --git a/doc/snippets/Tutorial_SlicingCol.cpp b/doc/snippets/Tutorial_SlicingCol.cpp
new file mode 100644
index 000000000..f667ff689
--- /dev/null
+++ b/doc/snippets/Tutorial_SlicingCol.cpp
@@ -0,0 +1,11 @@
+MatrixXf M1 = MatrixXf::Random(3,8);
+cout << "Column major input:" << endl << M1 << "\n";
+Map<MatrixXf,0,OuterStride<> > M2(M1.data(), M1.rows(), (M1.cols()+2)/3, OuterStride<>(M1.outerStride()*3));
+cout << "1 column over 3:" << endl << M2 << "\n";
+
+typedef Matrix<float,Dynamic,Dynamic,RowMajor> RowMajorMatrixXf;
+RowMajorMatrixXf M3(M1);
+cout << "Row major input:" << endl << M3 << "\n";
+Map<RowMajorMatrixXf,0,Stride<Dynamic,3> > M4(M3.data(), M3.rows(), (M3.cols()+2)/3,
+ Stride<Dynamic,3>(M3.outerStride(),3));
+cout << "1 column over 3:" << endl << M4 << "\n"; \ No newline at end of file
diff --git a/doc/snippets/Tutorial_SlicingVec.cpp b/doc/snippets/Tutorial_SlicingVec.cpp
new file mode 100644
index 000000000..07e10bf69
--- /dev/null
+++ b/doc/snippets/Tutorial_SlicingVec.cpp
@@ -0,0 +1,4 @@
+RowVectorXf v = RowVectorXf::LinSpaced(20,0,19);
+cout << "Input:" << endl << v << endl;
+Map<RowVectorXf,0,InnerStride<2> > v2(v.data(), v.size()/2);
+cout << "Even:" << v2 << endl; \ No newline at end of file