diff options
author | Gael Guennebaud <g.gael@free.fr> | 2012-12-24 13:33:22 +0100 |
---|---|---|
committer | Gael Guennebaud <g.gael@free.fr> | 2012-12-24 13:33:22 +0100 |
commit | f41d96deb9b64e74cd2d8c48a698f4443ce9bbab (patch) | |
tree | bb36884185b0bfa99c16338403fc5736cbd7eba9 /doc | |
parent | f450303321c4d37d908d5d9e6fcf480f27bf53e3 (diff) |
Fix several documentation issues
Diffstat (limited to 'doc')
-rw-r--r-- | doc/C03_TutorialArrayClass.dox | 6 | ||||
-rw-r--r-- | doc/C09_TutorialSparse.dox | 4 | ||||
-rw-r--r-- | doc/C10_TutorialMapClass.dox | 4 | ||||
-rw-r--r-- | doc/Doxyfile.in | 7 | ||||
-rw-r--r-- | doc/I14_PreprocessorDirectives.dox | 4 | ||||
-rw-r--r-- | doc/I17_SparseLinearSystems.dox | 6 | ||||
-rw-r--r-- | doc/QuickReference.dox | 8 | ||||
-rw-r--r-- | doc/UsingIntelMKL.dox | 2 |
8 files changed, 21 insertions, 20 deletions
diff --git a/doc/C03_TutorialArrayClass.dox b/doc/C03_TutorialArrayClass.dox index a1d8d6985..d15d6d515 100644 --- a/doc/C03_TutorialArrayClass.dox +++ b/doc/C03_TutorialArrayClass.dox @@ -122,7 +122,7 @@ arrays can be multiplied if and only if they have the same dimensions. The Array class defines other coefficient-wise operations besides the addition, subtraction and multiplication operators described above. For example, the \link ArrayBase::abs() .abs() \endlink method takes the absolute value of each coefficient, while \link ArrayBase::sqrt() .sqrt() \endlink computes the square root of the -coefficients. If you have two arrays of the same size, you can call \link ArrayBase::min() .min() \endlink to +coefficients. If you have two arrays of the same size, you can call \link ArrayBase::min(const Eigen::ArrayBase<OtherDerived>&) const .min(.) \endlink to construct the array whose coefficients are the minimum of the corresponding coefficients of the two given arrays. These operations are illustrated in the following example. @@ -168,8 +168,8 @@ The following example shows how to use array operations on a Matrix object by em * to multiply them coefficient-wise and assigns the result to the matrix variable \c result (this is legal because Eigen allows assigning array expressions to matrix variables). -As a matter of fact, this usage case is so common that Eigen provides a \link MatrixBase::cwiseProduct() -.cwiseProduct() \endlink method for matrices to compute the coefficient-wise product. This is also shown in +As a matter of fact, this usage case is so common that Eigen provides a \link MatrixBase::cwiseProduct() const +.cwiseProduct(.) \endlink method for matrices to compute the coefficient-wise product. This is also shown in the example program. <table class="example"> diff --git a/doc/C09_TutorialSparse.dox b/doc/C09_TutorialSparse.dox index 6a16c3ae2..a476c4a47 100644 --- a/doc/C09_TutorialSparse.dox +++ b/doc/C09_TutorialSparse.dox @@ -25,10 +25,10 @@ Manipulating and solving sparse problems involves various modules which are summ <table class="manual"> <tr><th>Module</th><th>Header file</th><th>Contents</th></tr> -<tr><td>\link Sparse_Module SparseCore \endlink</td><td>\code#include <Eigen/SparseCore>\endcode</td><td>SparseMatrix and SparseVector classes, matrix assembly, basic sparse linear algebra (including sparse triangular solvers)</td></tr> +<tr><td>\link SparseCore_Module SparseCore \endlink</td><td>\code#include <Eigen/SparseCore>\endcode</td><td>SparseMatrix and SparseVector classes, matrix assembly, basic sparse linear algebra (including sparse triangular solvers)</td></tr> <tr><td>\link SparseCholesky_Module SparseCholesky \endlink</td><td>\code#include <Eigen/SparseCholesky>\endcode</td><td>Direct sparse LLT and LDLT Cholesky factorization to solve sparse self-adjoint positive definite problems</td></tr> <tr><td>\link IterativeLinearSolvers_Module IterativeLinearSolvers \endlink</td><td>\code#include <Eigen/IterativeLinearSolvers>\endcode</td><td>Iterative solvers to solve large general linear square problems (including self-adjoint positive definite problems)</td></tr> -<tr><td></td><td>\code#include <Eigen/Sparse>\endcode</td><td>Includes all the above modules</td></tr> +<tr><td>\link Sparse_modules Sparse \endlink</td><td>\code#include <Eigen/Sparse>\endcode</td><td>Includes all the above modules</td></tr> </table> \section TutorialSparseIntro Sparse matrix representation diff --git a/doc/C10_TutorialMapClass.dox b/doc/C10_TutorialMapClass.dox index 09e792792..f4b5db39c 100644 --- a/doc/C10_TutorialMapClass.dox +++ b/doc/C10_TutorialMapClass.dox @@ -5,7 +5,7 @@ namespace Eigen { \ingroup Tutorial \li \b Previous: \ref TutorialSparse -\li \b Next: \ref TODO +\li \b Next: This tutorial page explains how to work with "raw" C++ arrays. This can be useful in a variety of contexts, particularly when "importing" vectors and matrices from other libraries into Eigen. @@ -89,7 +89,7 @@ for (int i = 0; i < n_matrices; i++) } \endcode -\li \b Next: \ref TODO +\li \b Next: */ diff --git a/doc/Doxyfile.in b/doc/Doxyfile.in index 167e65067..87518221c 100644 --- a/doc/Doxyfile.in +++ b/doc/Doxyfile.in @@ -1214,7 +1214,9 @@ PREDEFINED = EIGEN_EMPTY_STRUCT \ EIGEN_VECTORIZE \ EIGEN_QT_SUPPORT \ EIGEN_STRONG_INLINE=inline \ - EIGEN2_SUPPORT_STAGE=99 + "EIGEN2_SUPPORT_STAGE=99" \ + "EIGEN_MAKE_CWISE_BINARY_OP(METHOD,FUNCTOR)=template<typename OtherDerived> const CwiseBinaryOp<FUNCTOR<Scalar>, const Derived, const OtherDerived> METHOD(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const;" \ + "EIGEN_CWISE_PRODUCT_RETURN_TYPE(LHS,RHS)=CwiseBinaryOp<internal::scalar_product_op<typename LHS::Scalar, typename RHS::Scalar >, const LHS, const RHS>" # If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then # this tag can be used to specify a list of macro names that should be expanded. @@ -1224,11 +1226,10 @@ PREDEFINED = EIGEN_EMPTY_STRUCT \ EXPAND_AS_DEFINED = EIGEN_MAKE_TYPEDEFS \ EIGEN_MAKE_FIXED_TYPEDEFS \ EIGEN_MAKE_TYPEDEFS_ALL_SIZES \ - EIGEN_MAKE_CWISE_BINARY_OP \ EIGEN_CWISE_UNOP_RETURN_TYPE \ EIGEN_CWISE_BINOP_RETURN_TYPE \ - EIGEN_CWISE_PRODUCT_RETURN_TYPE \ EIGEN_CURRENT_STORAGE_BASE_CLASS \ + EIGEN_MATHFUNC_IMPL \ _EIGEN_GENERIC_PUBLIC_INTERFACE \ EIGEN2_SUPPORT diff --git a/doc/I14_PreprocessorDirectives.dox b/doc/I14_PreprocessorDirectives.dox index f29f0720c..948f352d7 100644 --- a/doc/I14_PreprocessorDirectives.dox +++ b/doc/I14_PreprocessorDirectives.dox @@ -29,8 +29,8 @@ are doing. Eigen3; see \ref Eigen2SupportModes. - \b EIGEN_DEFAULT_DENSE_INDEX_TYPE - the type for column and row indices in matrices, vectors and array (DenseBase::Index). Set to \c std::ptrdiff_t by default. - - \b EIGEN_DEFAULT_IO_FORMAT - the IOFormat to use when printing a matrix if no #IOFormat is specified. - Defaults to the #IOFormat constructed by the default constructor IOFormat(). + - \b EIGEN_DEFAULT_IO_FORMAT - the IOFormat to use when printing a matrix if no %IOFormat is specified. + Defaults to the %IOFormat constructed by the default constructor IOFormat::IOFormat(). - \b EIGEN_INITIALIZE_MATRICES_BY_ZERO - if defined, all entries of newly constructed matrices and arrays are initializes to zero, as are new entries in matrices and arrays after resizing. Not defined by default. - \b EIGEN_NO_AUTOMATIC_RESIZING - if defined, the matrices (or arrays) on both sides of an assignment diff --git a/doc/I17_SparseLinearSystems.dox b/doc/I17_SparseLinearSystems.dox index cc8987d8a..e0179d1f6 100644 --- a/doc/I17_SparseLinearSystems.dox +++ b/doc/I17_SparseLinearSystems.dox @@ -39,7 +39,7 @@ Eigen provides a limited set of methods to reorder the matrix in this step, eith DirectSolverClassName<SparseMatrix<double>, OrderingMethod<IndexType> > solver; \endcode -See \link Ordering_Modules the Ordering module \endlink for the list of available methods and the associated options. +See the \link OrderingMethods_Module OrderingMethods module \endlink for the list of available methods and the associated options. In factorize(), the factors of the coefficient matrix are computed. This step should be called each time the values of the matrix change. However, the structural pattern of the matrix should not change between multiple calls. @@ -64,10 +64,10 @@ x1 = solver.solve(b1); x2 = solver.solve(b2); // ... \endcode -For direct methods, the solution are computed at the machine precision. Sometimes, the solution need not be too accurate. In this case, the iterative methods are more suitable and the desired accuracy can be set before the solve step using setTolerance(). For all the available functions, please, refer to the documentation of the \link IterativeLinearSolvers_module Iterative solvers module \endlink. +For direct methods, the solution are computed at the machine precision. Sometimes, the solution need not be too accurate. In this case, the iterative methods are more suitable and the desired accuracy can be set before the solve step using setTolerance(). For all the available functions, please, refer to the documentation of the \link IterativeLinearSolvers_Module Iterative solvers module \endlink. \section BenchmarkRoutine -Most of the time, all you need is to know how much time it will take to qolve your system, and hopefully, what is the most suitable solver. In Eigen, we provide a benchmark routine that can be used for this purpose. It is very easy to use. First, it should be activated at the configuration step with the flag TEST_REAL_CASES. Then, in bench/spbench, you can compile the routine by typing \b make \e spbenchsolver. You can then run it with --help option to get the list of all available options. Basically, the matrices to test should be in \link http://math.nist.gov/MatrixMarket/formats.html MatrixMarket Coordinate format \endlink, and the routine returns the statistics from all available solvers in Eigen. +Most of the time, all you need is to know how much time it will take to qolve your system, and hopefully, what is the most suitable solver. In Eigen, we provide a benchmark routine that can be used for this purpose. It is very easy to use. First, it should be activated at the configuration step with the flag TEST_REAL_CASES. Then, in bench/spbench, you can compile the routine by typing \b make \e spbenchsolver. You can then run it with --help option to get the list of all available options. Basically, the matrices to test should be in <a href="http://math.nist.gov/MatrixMarket/formats.html">MatrixMarket Coordinate format</a>, and the routine returns the statistics from all available solvers in Eigen. The following table gives an example of XHTML statistics from several Eigen built-in and external solvers. <TABLE border="1"> diff --git a/doc/QuickReference.dox b/doc/QuickReference.dox index 31b8030f4..f91b96e52 100644 --- a/doc/QuickReference.dox +++ b/doc/QuickReference.dox @@ -31,7 +31,7 @@ The Eigen library is divided in a Core module and several additional modules. Ea <tr><td>\link SVD_Module SVD \endlink</td><td>\code#include <Eigen/SVD>\endcode</td><td>SVD decomposition with least-squares solver (JacobiSVD)</td></tr> <tr class="alt"><td>\link QR_Module QR \endlink</td><td>\code#include <Eigen/QR>\endcode</td><td>QR decomposition with solver (HouseholderQR, ColPivHouseholderQR, FullPivHouseholderQR)</td></tr> <tr><td>\link Eigenvalues_Module Eigenvalues \endlink</td><td>\code#include <Eigen/Eigenvalues>\endcode</td><td>Eigenvalue, eigenvector decompositions (EigenSolver, SelfAdjointEigenSolver, ComplexEigenSolver)</td></tr> -<tr class="alt"><td>\link Sparse_Module Sparse \endlink</td><td>\code#include <Eigen/Sparse>\endcode</td><td>%Sparse matrix storage and related basic linear algebra (SparseMatrix, DynamicSparseMatrix, SparseVector)</td></tr> +<tr class="alt"><td>\link Sparse_modules Sparse \endlink</td><td>\code#include <Eigen/Sparse>\endcode</td><td>%Sparse matrix storage and related basic linear algebra (SparseMatrix, DynamicSparseMatrix, SparseVector)</td></tr> <tr><td></td><td>\code#include <Eigen/Dense>\endcode</td><td>Includes Core, Geometry, LU, Cholesky, SVD, QR, and Eigenvalues header files</td></tr> <tr class="alt"><td></td><td>\code#include <Eigen/Eigen>\endcode</td><td>Includes %Dense and %Sparse header files (the whole Eigen library)</td></tr> </table> @@ -89,8 +89,8 @@ MatrixWrapper<Array44f> a1m(a1); \endcode In the rest of this document we will use the following symbols to emphasize the features which are specifics to a given kind of object: -\li <a name="matrixonly"><a/>\matrixworld linear algebra matrix and vector only -\li <a name="arrayonly"><a/>\arrayworld array objects only +\li <a name="matrixonly"></a>\matrixworld linear algebra matrix and vector only +\li <a name="arrayonly"></a>\arrayworld array objects only \subsection QuickRef_Basics Basic matrix manipulation @@ -458,7 +458,7 @@ mat = 2 7 8 \endcode</td></tr> </table> -Special versions of \link DenseBase::minCoeff(Index*,Index*) minCoeff \endlink and \link DenseBase::maxCoeff(Index*,Index*) maxCoeff \endlink: +Special versions of \link DenseBase::minCoeff(IndexType*,IndexType*) const minCoeff \endlink and \link DenseBase::maxCoeff(IndexType*,IndexType*) const maxCoeff \endlink: \code int i, j; s = vector.minCoeff(&i); // s == vector[i] diff --git a/doc/UsingIntelMKL.dox b/doc/UsingIntelMKL.dox index 379ee3ffd..f4444335d 100644 --- a/doc/UsingIntelMKL.dox +++ b/doc/UsingIntelMKL.dox @@ -61,7 +61,7 @@ In addition you can coarsely select choose which parts will be substituted by de <tr><td>\c EIGEN_USE_MKL_ALL </td><td>Defines \c EIGEN_USE_BLAS, \c EIGEN_USE_LAPACKE, and \c EIGEN_USE_MKL_VML </td></tr> </table> -Finally, the PARDISO sparse solver shipped with Intel MKL can be used through the \ref PardisoLU, \ref PardisoLLT and \ref PardisoLDLT classes of the \ref PARDISOSupport_Module. +Finally, the PARDISO sparse solver shipped with Intel MKL can be used through the \ref PardisoLU, \ref PardisoLLT and \ref PardisoLDLT classes of the \ref PardisoSupport_Module. \section TopicUsingIntelMKL_SupportedFeatures List of supported features |