aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--Eigen/Core2
-rw-r--r--Eigen/src/Cholesky/LDLT.h2
-rw-r--r--Eigen/src/Cholesky/LLT.h2
-rw-r--r--Eigen/src/Core/AssignEvaluator.h4
-rw-r--r--Eigen/src/Core/DenseBase.h2
-rw-r--r--Eigen/src/Core/DenseStorage.h2
-rw-r--r--Eigen/src/Core/MathFunctions.h2
-rw-r--r--Eigen/src/Core/NoAlias.h4
-rw-r--r--Eigen/src/Core/PlainObjectBase.h2
-rw-r--r--Eigen/src/Core/Product.h2
-rw-r--r--Eigen/src/Core/Transpositions.h2
-rw-r--r--Eigen/src/Core/TriangularMatrix.h4
-rwxr-xr-xEigen/src/Core/arch/AltiVec/PacketMath.h4
-rw-r--r--Eigen/src/Core/arch/SSE/MathFunctions.h2
-rw-r--r--Eigen/src/Core/products/GeneralBlockPanelKernel.h4
-rw-r--r--Eigen/src/Core/products/GeneralMatrixVector.h2
-rw-r--r--Eigen/src/Core/products/Parallelizer.h2
-rw-r--r--Eigen/src/Core/products/SelfadjointMatrixVector.h2
-rw-r--r--Eigen/src/Core/util/Macros.h4
-rw-r--r--Eigen/src/Core/util/Memory.h2
-rwxr-xr-xEigen/src/Core/util/Meta.h2
-rw-r--r--Eigen/src/Eigenvalues/ComplexEigenSolver.h2
-rw-r--r--Eigen/src/Eigenvalues/ComplexSchur.h2
-rw-r--r--Eigen/src/Eigenvalues/EigenSolver.h2
-rw-r--r--Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h2
-rw-r--r--Eigen/src/Eigenvalues/RealQZ.h2
-rw-r--r--Eigen/src/Eigenvalues/RealSchur.h2
-rw-r--r--Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h2
-rwxr-xr-xEigen/src/Geometry/Scaling.h2
-rw-r--r--Eigen/src/IterativeLinearSolvers/IncompleteLUT.h4
-rw-r--r--Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h2
-rw-r--r--Eigen/src/KLUSupport/KLUSupport.h2
-rw-r--r--Eigen/src/LU/FullPivLU.h2
-rw-r--r--Eigen/src/LU/PartialPivLU.h4
-rw-r--r--Eigen/src/OrderingMethods/Eigen_Colamd.h6
-rw-r--r--Eigen/src/PaStiXSupport/PaStiXSupport.h2
-rw-r--r--Eigen/src/PardisoSupport/PardisoSupport.h2
-rw-r--r--Eigen/src/QR/ColPivHouseholderQR.h2
-rw-r--r--Eigen/src/QR/CompleteOrthogonalDecomposition.h2
-rw-r--r--Eigen/src/SPQRSupport/SuiteSparseQRSupport.h2
-rw-r--r--Eigen/src/SVD/BDCSVD.h2
-rw-r--r--Eigen/src/SVD/UpperBidiagonalization.h4
-rw-r--r--Eigen/src/SparseCholesky/SimplicialCholesky.h2
-rw-r--r--Eigen/src/SparseCore/SparseMatrix.h6
-rw-r--r--Eigen/src/SparseCore/SparseProduct.h2
-rw-r--r--Eigen/src/SparseCore/SparseVector.h2
-rw-r--r--Eigen/src/SparseLU/SparseLU.h2
-rw-r--r--Eigen/src/SparseLU/SparseLU_Memory.h2
-rw-r--r--Eigen/src/SparseLU/SparseLU_column_dfs.h4
-rw-r--r--Eigen/src/SparseLU/SparseLU_gemm_kernel.h2
-rw-r--r--Eigen/src/SparseLU/SparseLU_panel_bmod.h2
-rw-r--r--Eigen/src/SuperLUSupport/SuperLUSupport.h2
-rw-r--r--Eigen/src/UmfPackSupport/UmfPackSupport.h2
-rw-r--r--Eigen/src/plugins/IndexedViewMethods.h2
-rw-r--r--bench/analyze-blocking-sizes.cpp2
-rw-r--r--bench/btl/README2
-rw-r--r--bench/btl/generic_bench/bench.hh2
-rw-r--r--bench/btl/generic_bench/utils/size_log.hh2
-rw-r--r--bench/btl/generic_bench/utils/xy_file.hh2
-rw-r--r--bench/btl/libs/ublas/ublas_interface.hh2
-rw-r--r--bench/eig33.cpp2
-rw-r--r--bench/spbench/spbenchsolver.cpp2
-rw-r--r--blas/f2c/ctbmv.c2
-rw-r--r--blas/f2c/dtbmv.c2
-rw-r--r--blas/f2c/stbmv.c2
-rw-r--r--blas/f2c/ztbmv.c2
-rw-r--r--blas/level1_impl.h2
-rw-r--r--blas/testing/cblat1.f2
-rw-r--r--blas/testing/dblat1.f2
-rw-r--r--blas/testing/sblat1.f2
-rw-r--r--blas/testing/zblat1.f2
-rw-r--r--cmake/EigenConfigureTesting.cmake4
-rw-r--r--cmake/EigenTesting.cmake10
-rw-r--r--cmake/FindComputeCpp.cmake2
-rw-r--r--cmake/FindEigen3.cmake2
-rw-r--r--debug/msvc/eigen_autoexp_part.dat2
-rw-r--r--doc/Doxyfile.in2
-rw-r--r--doc/FunctionsTakingEigenTypes.dox4
-rw-r--r--doc/PreprocessorDirectives.dox2
-rw-r--r--doc/QuickStartGuide.dox2
-rw-r--r--doc/SparseQuickReference.dox2
-rw-r--r--doc/TemplateKeyword.dox2
-rw-r--r--doc/TopicLazyEvaluation.dox2
-rw-r--r--doc/TopicLinearAlgebraDecompositions.dox2
-rw-r--r--doc/TopicMultithreading.dox2
-rw-r--r--doc/TutorialMapClass.dox4
-rw-r--r--doc/TutorialSparse.dox4
-rw-r--r--doc/UnalignedArrayAssert.dox4
-rw-r--r--doc/UsingNVCC.dox2
-rw-r--r--doc/eigendoxy.css2
-rw-r--r--doc/special_examples/Tutorial_sparse_example.cpp2
-rw-r--r--lapack/CMakeLists.txt4
-rw-r--r--test/CMakeLists.txt2
-rw-r--r--test/bdcsvd.cpp2
-rw-r--r--test/eigensolver_complex.cpp2
-rw-r--r--test/geo_quaternion.cpp2
-rw-r--r--test/main.h2
-rw-r--r--test/packetmath.cpp2
98 files changed, 122 insertions, 122 deletions
diff --git a/Eigen/Core b/Eigen/Core
index 5a6dec8cc..a9bbfe276 100644
--- a/Eigen/Core
+++ b/Eigen/Core
@@ -360,7 +360,7 @@ inline static const char *SimdInstructionSetsInUse(void) {
namespace Eigen {
-// we use size_t frequently and we'll never remember to prepend it with std:: everytime just to
+// we use size_t frequently and we'll never remember to prepend it with std:: every time just to
// ensure QNX/QCC support
using std::size_t;
// gcc 4.6.0 wants std:: for ptrdiff_t
diff --git a/Eigen/src/Cholesky/LDLT.h b/Eigen/src/Cholesky/LDLT.h
index 13a8f6d14..5be58377b 100644
--- a/Eigen/src/Cholesky/LDLT.h
+++ b/Eigen/src/Cholesky/LDLT.h
@@ -247,7 +247,7 @@ template<typename _MatrixType, int _UpLo> class LDLT
/** \brief Reports whether previous computation was successful.
*
- * \returns \c Success if computation was succesful,
+ * \returns \c Success if computation was successful,
* \c NumericalIssue if the factorization failed because of a zero pivot.
*/
ComputationInfo info() const
diff --git a/Eigen/src/Cholesky/LLT.h b/Eigen/src/Cholesky/LLT.h
index 814174d47..22e4be75d 100644
--- a/Eigen/src/Cholesky/LLT.h
+++ b/Eigen/src/Cholesky/LLT.h
@@ -180,7 +180,7 @@ template<typename _MatrixType, int _UpLo> class LLT
/** \brief Reports whether previous computation was successful.
*
- * \returns \c Success if computation was succesful,
+ * \returns \c Success if computation was successful,
* \c NumericalIssue if the matrix.appears not to be positive definite.
*/
ComputationInfo info() const
diff --git a/Eigen/src/Core/AssignEvaluator.h b/Eigen/src/Core/AssignEvaluator.h
index ebf5590de..362d905d2 100644
--- a/Eigen/src/Core/AssignEvaluator.h
+++ b/Eigen/src/Core/AssignEvaluator.h
@@ -756,7 +756,7 @@ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_dense_assignment_loop(DstXprType
// AssignmentKind must define a Kind typedef.
template<typename DstShape, typename SrcShape> struct AssignmentKind;
-// Assignement kind defined in this file:
+// Assignment kind defined in this file:
struct Dense2Dense {};
struct EigenBase2EigenBase {};
@@ -899,7 +899,7 @@ struct Assignment<DstXprType, SrcXprType, Functor, EigenBase2EigenBase, Weak>
src.evalTo(dst);
}
- // NOTE The following two functions are templated to avoid their instanciation if not needed
+ // NOTE The following two functions are templated to avoid their instantiation if not needed
// This is needed because some expressions supports evalTo only and/or have 'void' as scalar type.
template<typename SrcScalarType>
EIGEN_DEVICE_FUNC
diff --git a/Eigen/src/Core/DenseBase.h b/Eigen/src/Core/DenseBase.h
index fd933eed4..53b427b17 100644
--- a/Eigen/src/Core/DenseBase.h
+++ b/Eigen/src/Core/DenseBase.h
@@ -395,7 +395,7 @@ template<typename Derived> class DenseBase
* Notice that in the case of a plain matrix or vector (not an expression) this function just returns
* a const reference, in order to avoid a useless copy.
*
- * \warning Be carefull with eval() and the auto C++ keyword, as detailed in this \link TopicPitfalls_auto_keyword page \endlink.
+ * \warning Be careful with eval() and the auto C++ keyword, as detailed in this \link TopicPitfalls_auto_keyword page \endlink.
*/
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE EvalReturnType eval() const
diff --git a/Eigen/src/Core/DenseStorage.h b/Eigen/src/Core/DenseStorage.h
index 7958feeb9..9e58fbf88 100644
--- a/Eigen/src/Core/DenseStorage.h
+++ b/Eigen/src/Core/DenseStorage.h
@@ -61,7 +61,7 @@ struct plain_array
#if defined(EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT)
#define EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(sizemask)
#elif EIGEN_GNUC_AT_LEAST(4,7)
- // GCC 4.7 is too aggressive in its optimizations and remove the alignement test based on the fact the array is declared to be aligned.
+ // GCC 4.7 is too aggressive in its optimizations and remove the alignment test based on the fact the array is declared to be aligned.
// See this bug report: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=53900
// Hiding the origin of the array pointer behind a function argument seems to do the trick even if the function is inlined:
template<typename PtrType>
diff --git a/Eigen/src/Core/MathFunctions.h b/Eigen/src/Core/MathFunctions.h
index 1b864a405..e981129b2 100644
--- a/Eigen/src/Core/MathFunctions.h
+++ b/Eigen/src/Core/MathFunctions.h
@@ -749,7 +749,7 @@ inline EIGEN_MATHFUNC_RETVAL(random, Scalar) random()
return EIGEN_MATHFUNC_IMPL(random, Scalar)::run();
}
-// Implementatin of is* functions
+// Implementation of is* functions
// std::is* do not work with fast-math and gcc, std::is* are available on MSVC 2013 and newer, as well as in clang.
#if (EIGEN_HAS_CXX11_MATH && !(EIGEN_COMP_GNUC_STRICT && __FINITE_MATH_ONLY__)) || (EIGEN_COMP_MSVC>=1800) || (EIGEN_COMP_CLANG)
diff --git a/Eigen/src/Core/NoAlias.h b/Eigen/src/Core/NoAlias.h
index e94c8ee96..570283d90 100644
--- a/Eigen/src/Core/NoAlias.h
+++ b/Eigen/src/Core/NoAlias.h
@@ -75,10 +75,10 @@ class NoAlias
*
* More precisely, noalias() allows to bypass the EvalBeforeAssignBit flag.
* Currently, even though several expressions may alias, only product
- * expressions have this flag. Therefore, noalias() is only usefull when
+ * expressions have this flag. Therefore, noalias() is only useful when
* the source expression contains a matrix product.
*
- * Here are some examples where noalias is usefull:
+ * Here are some examples where noalias is useful:
* \code
* D.noalias() = A * B;
* D.noalias() += A.transpose() * B;
diff --git a/Eigen/src/Core/PlainObjectBase.h b/Eigen/src/Core/PlainObjectBase.h
index 1dc7e223a..6c0a42ec7 100644
--- a/Eigen/src/Core/PlainObjectBase.h
+++ b/Eigen/src/Core/PlainObjectBase.h
@@ -780,7 +780,7 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
resize(size);
}
- // We have a 1x1 matrix/array => the argument is interpreted as the value of the unique coefficient (case where scalar type can be implicitely converted)
+ // We have a 1x1 matrix/array => the argument is interpreted as the value of the unique coefficient (case where scalar type can be implicitly converted)
template<typename T>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE void _init1(const Scalar& val0, typename internal::enable_if<Base::SizeAtCompileTime==1 && internal::is_convertible<T, Scalar>::value,T>::type* = 0)
diff --git a/Eigen/src/Core/Product.h b/Eigen/src/Core/Product.h
index 676c48027..3d67d9489 100644
--- a/Eigen/src/Core/Product.h
+++ b/Eigen/src/Core/Product.h
@@ -116,7 +116,7 @@ class dense_product_base
: public internal::dense_xpr_base<Product<Lhs,Rhs,Option> >::type
{};
-/** Convertion to scalar for inner-products */
+/** Conversion to scalar for inner-products */
template<typename Lhs, typename Rhs, int Option>
class dense_product_base<Lhs, Rhs, Option, InnerProduct>
: public internal::dense_xpr_base<Product<Lhs,Rhs,Option> >::type
diff --git a/Eigen/src/Core/Transpositions.h b/Eigen/src/Core/Transpositions.h
index 19c17bb4a..8798deca5 100644
--- a/Eigen/src/Core/Transpositions.h
+++ b/Eigen/src/Core/Transpositions.h
@@ -84,7 +84,7 @@ class TranspositionsBase
}
// FIXME: do we want such methods ?
- // might be usefull when the target matrix expression is complex, e.g.:
+ // might be useful when the target matrix expression is complex, e.g.:
// object.matrix().block(..,..,..,..) = trans * object.matrix().block(..,..,..,..);
/*
template<typename MatrixType>
diff --git a/Eigen/src/Core/TriangularMatrix.h b/Eigen/src/Core/TriangularMatrix.h
index ed80da36a..ab73fcf21 100644
--- a/Eigen/src/Core/TriangularMatrix.h
+++ b/Eigen/src/Core/TriangularMatrix.h
@@ -470,7 +470,7 @@ template<typename _MatrixType, unsigned int _Mode> class TriangularViewImpl<_Mat
* \a Side==OnTheLeft (the default), or the right-inverse-multiply \a other * inverse(\c *this) if
* \a Side==OnTheRight.
*
- * Note that the template parameter \c Side can be ommitted, in which case \c Side==OnTheLeft
+ * Note that the template parameter \c Side can be omitted, in which case \c Side==OnTheLeft
*
* The matrix \c *this must be triangular and invertible (i.e., all the coefficients of the
* diagonal must be non zero). It works as a forward (resp. backward) substitution if \c *this
@@ -496,7 +496,7 @@ template<typename _MatrixType, unsigned int _Mode> class TriangularViewImpl<_Mat
* \warning The parameter is only marked 'const' to make the C++ compiler accept a temporary expression here.
* This function will const_cast it, so constness isn't honored here.
*
- * Note that the template parameter \c Side can be ommitted, in which case \c Side==OnTheLeft
+ * Note that the template parameter \c Side can be omitted, in which case \c Side==OnTheLeft
*
* See TriangularView:solve() for the details.
*/
diff --git a/Eigen/src/Core/arch/AltiVec/PacketMath.h b/Eigen/src/Core/arch/AltiVec/PacketMath.h
index b3f1ea199..31bb896ca 100755
--- a/Eigen/src/Core/arch/AltiVec/PacketMath.h
+++ b/Eigen/src/Core/arch/AltiVec/PacketMath.h
@@ -434,7 +434,7 @@ template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int* from)
return (Packet4i) vec_perm(MSQ, LSQ, mask); // align the data
}
#else
-// We also need ot redefine little endian loading of Packet4i/Packet4f using VSX
+// We also need to redefine little endian loading of Packet4i/Packet4f using VSX
template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int* from)
{
EIGEN_DEBUG_UNALIGNED_LOAD
@@ -500,7 +500,7 @@ template<> EIGEN_STRONG_INLINE void pstoreu<int>(int* to, const Packet4i& f
vec_st( MSQ, 0, (unsigned char *)to ); // Store the MSQ part
}
#else
-// We also need ot redefine little endian loading of Packet4i/Packet4f using VSX
+// We also need to redefine little endian loading of Packet4i/Packet4f using VSX
template<> EIGEN_STRONG_INLINE void pstoreu<int>(int* to, const Packet4i& from)
{
EIGEN_DEBUG_ALIGNED_STORE
diff --git a/Eigen/src/Core/arch/SSE/MathFunctions.h b/Eigen/src/Core/arch/SSE/MathFunctions.h
index 7b5f948e1..4af2c6cae 100644
--- a/Eigen/src/Core/arch/SSE/MathFunctions.h
+++ b/Eigen/src/Core/arch/SSE/MathFunctions.h
@@ -242,7 +242,7 @@ Packet2d pexp<Packet2d>(const Packet2d& _x)
return pmax(pmul(x, Packet2d(_mm_castsi128_pd(emm0))), _x);
}
-/* evaluation of 4 sines at onces, using SSE2 intrinsics.
+/* evaluation of 4 sines at once, using SSE2 intrinsics.
The code is the exact rewriting of the cephes sinf function.
Precision is excellent as long as x < 8192 (I did not bother to
diff --git a/Eigen/src/Core/products/GeneralBlockPanelKernel.h b/Eigen/src/Core/products/GeneralBlockPanelKernel.h
index 45230bce5..9072d0ff3 100644
--- a/Eigen/src/Core/products/GeneralBlockPanelKernel.h
+++ b/Eigen/src/Core/products/GeneralBlockPanelKernel.h
@@ -1523,7 +1523,7 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,Conjuga
prefetch(&blA[0]);
const RhsScalar* blB = &blockB[j2*strideB+offsetB*nr];
- // The following piece of code wont work for 512 bit registers
+ // The following piece of code won't work for 512 bit registers
// Moreover, if LhsProgress==8 it assumes that there is a half packet of the same size
// as nr (which is currently 4) for the return type.
typedef typename unpacket_traits<SResPacket>::half SResPacketHalf;
@@ -1924,7 +1924,7 @@ EIGEN_DONT_INLINE void gemm_pack_rhs<Scalar, Index, DataMapper, nr, ColMajor, Co
// const Scalar* b6 = &rhs[(j2+6)*rhsStride];
// const Scalar* b7 = &rhs[(j2+7)*rhsStride];
// Index k=0;
-// if(PacketSize==8) // TODO enbale vectorized transposition for PacketSize==4
+// if(PacketSize==8) // TODO enable vectorized transposition for PacketSize==4
// {
// for(; k<peeled_k; k+=PacketSize) {
// PacketBlock<Packet> kernel;
diff --git a/Eigen/src/Core/products/GeneralMatrixVector.h b/Eigen/src/Core/products/GeneralMatrixVector.h
index 41d8242e1..b2a71bc6f 100644
--- a/Eigen/src/Core/products/GeneralMatrixVector.h
+++ b/Eigen/src/Core/products/GeneralMatrixVector.h
@@ -201,7 +201,7 @@ EIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,LhsMapper,C
}
/* Optimized row-major matrix * vector product:
- * This algorithm processes 4 rows at onces that allows to both reduce
+ * This algorithm processes 4 rows at once that allows to both reduce
* the number of load/stores of the result by a factor 4 and to reduce
* the instruction dependency. Moreover, we know that all bands have the
* same alignment pattern.
diff --git a/Eigen/src/Core/products/Parallelizer.h b/Eigen/src/Core/products/Parallelizer.h
index c2f084c82..e4d13103b 100644
--- a/Eigen/src/Core/products/Parallelizer.h
+++ b/Eigen/src/Core/products/Parallelizer.h
@@ -117,7 +117,7 @@ void parallelize_gemm(const Functor& func, Index rows, Index cols, Index depth,
// compute the number of threads we are going to use
Index threads = std::min<Index>(nbThreads(), pb_max_threads);
- // if multi-threading is explicitely disabled, not useful, or if we already are in a parallel session,
+ // if multi-threading is explicitly disabled, not useful, or if we already are in a parallel session,
// then abort multi-threading
// FIXME omp_get_num_threads()>1 only works for openmp, what if the user does not use openmp?
if((!Condition) || (threads==1) || (omp_get_num_threads()>1))
diff --git a/Eigen/src/Core/products/SelfadjointMatrixVector.h b/Eigen/src/Core/products/SelfadjointMatrixVector.h
index 3fd180e6c..67390f1d7 100644
--- a/Eigen/src/Core/products/SelfadjointMatrixVector.h
+++ b/Eigen/src/Core/products/SelfadjointMatrixVector.h
@@ -15,7 +15,7 @@ namespace Eigen {
namespace internal {
/* Optimized selfadjoint matrix * vector product:
- * This algorithm processes 2 columns at onces that allows to both reduce
+ * This algorithm processes 2 columns at once that allows to both reduce
* the number of load/stores of the result by a factor 2 and to reduce
* the instruction dependency.
*/
diff --git a/Eigen/src/Core/util/Macros.h b/Eigen/src/Core/util/Macros.h
index e351b7ad9..5872ade26 100644
--- a/Eigen/src/Core/util/Macros.h
+++ b/Eigen/src/Core/util/Macros.h
@@ -719,7 +719,7 @@ namespace Eigen {
#error EIGEN_MAX_STATIC_ALIGN_BYTES and EIGEN_DONT_ALIGN[_STATICALLY] are both defined with EIGEN_MAX_STATIC_ALIGN_BYTES!=0. Use EIGEN_MAX_STATIC_ALIGN_BYTES=0 as a synonym of EIGEN_DONT_ALIGN_STATICALLY.
#endif
-// EIGEN_DONT_ALIGN_STATICALLY and EIGEN_DONT_ALIGN are deprectated
+// EIGEN_DONT_ALIGN_STATICALLY and EIGEN_DONT_ALIGN are deprecated
// They imply EIGEN_MAX_STATIC_ALIGN_BYTES=0
#if defined(EIGEN_DONT_ALIGN_STATICALLY) || defined(EIGEN_DONT_ALIGN)
#ifdef EIGEN_MAX_STATIC_ALIGN_BYTES
@@ -778,7 +778,7 @@ namespace Eigen {
#endif
// At this stage, EIGEN_MAX_STATIC_ALIGN_BYTES>0 is the true test whether we want to align arrays on the stack or not.
-// It takes into account both the user choice to explicitly enable/disable alignment (by settting EIGEN_MAX_STATIC_ALIGN_BYTES)
+// It takes into account both the user choice to explicitly enable/disable alignment (by setting EIGEN_MAX_STATIC_ALIGN_BYTES)
// and the architecture config (EIGEN_ARCH_WANTS_STACK_ALIGNMENT).
// Henceforth, only EIGEN_MAX_STATIC_ALIGN_BYTES should be used.
diff --git a/Eigen/src/Core/util/Memory.h b/Eigen/src/Core/util/Memory.h
index c455f92a1..006b0bfba 100644
--- a/Eigen/src/Core/util/Memory.h
+++ b/Eigen/src/Core/util/Memory.h
@@ -703,7 +703,7 @@ template<typename T> void swap(scoped_array<T> &a,scoped_array<T> &b)
* - 32 bytes alignment if AVX is enabled.
* - 64 bytes alignment if AVX512 is enabled.
*
-* This can be controled using the \c EIGEN_MAX_ALIGN_BYTES macro as documented
+* This can be controlled using the \c EIGEN_MAX_ALIGN_BYTES macro as documented
* \link TopicPreprocessorDirectivesPerformance there \endlink.
*
* Example:
diff --git a/Eigen/src/Core/util/Meta.h b/Eigen/src/Core/util/Meta.h
index 0fa818008..998b8921a 100755
--- a/Eigen/src/Core/util/Meta.h
+++ b/Eigen/src/Core/util/Meta.h
@@ -272,7 +272,7 @@ template<> struct numeric_limits<unsigned long long>
#endif
/** \internal
- * A base class do disable default copy ctor and copy assignement operator.
+ * A base class do disable default copy ctor and copy assignment operator.
*/
class noncopyable
{
diff --git a/Eigen/src/Eigenvalues/ComplexEigenSolver.h b/Eigen/src/Eigenvalues/ComplexEigenSolver.h
index dc5fae06a..081e918f1 100644
--- a/Eigen/src/Eigenvalues/ComplexEigenSolver.h
+++ b/Eigen/src/Eigenvalues/ComplexEigenSolver.h
@@ -214,7 +214,7 @@ template<typename _MatrixType> class ComplexEigenSolver
/** \brief Reports whether previous computation was successful.
*
- * \returns \c Success if computation was succesful, \c NoConvergence otherwise.
+ * \returns \c Success if computation was successful, \c NoConvergence otherwise.
*/
ComputationInfo info() const
{
diff --git a/Eigen/src/Eigenvalues/ComplexSchur.h b/Eigen/src/Eigenvalues/ComplexSchur.h
index 7f38919f7..b8b3490c6 100644
--- a/Eigen/src/Eigenvalues/ComplexSchur.h
+++ b/Eigen/src/Eigenvalues/ComplexSchur.h
@@ -212,7 +212,7 @@ template<typename _MatrixType> class ComplexSchur
/** \brief Reports whether previous computation was successful.
*
- * \returns \c Success if computation was succesful, \c NoConvergence otherwise.
+ * \returns \c Success if computation was successful, \c NoConvergence otherwise.
*/
ComputationInfo info() const
{
diff --git a/Eigen/src/Eigenvalues/EigenSolver.h b/Eigen/src/Eigenvalues/EigenSolver.h
index f205b185d..997bebe7b 100644
--- a/Eigen/src/Eigenvalues/EigenSolver.h
+++ b/Eigen/src/Eigenvalues/EigenSolver.h
@@ -277,7 +277,7 @@ template<typename _MatrixType> class EigenSolver
template<typename InputType>
EigenSolver& compute(const EigenBase<InputType>& matrix, bool computeEigenvectors = true);
- /** \returns NumericalIssue if the input contains INF or NaN values or overflow occured. Returns Success otherwise. */
+ /** \returns NumericalIssue if the input contains INF or NaN values or overflow occurred. Returns Success otherwise. */
ComputationInfo info() const
{
eigen_assert(m_isInitialized && "EigenSolver is not initialized.");
diff --git a/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h b/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h
index 5f6bb8289..d0f9091be 100644
--- a/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h
+++ b/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h
@@ -121,7 +121,7 @@ class GeneralizedSelfAdjointEigenSolver : public SelfAdjointEigenSolver<_MatrixT
*
* \returns Reference to \c *this
*
- * Accoring to \p options, this function computes eigenvalues and (if requested)
+ * According to \p options, this function computes eigenvalues and (if requested)
* the eigenvectors of one of the following three generalized eigenproblems:
* - \c Ax_lBx: \f$ Ax = \lambda B x \f$
* - \c ABx_lx: \f$ ABx = \lambda x \f$
diff --git a/Eigen/src/Eigenvalues/RealQZ.h b/Eigen/src/Eigenvalues/RealQZ.h
index b3a910dd9..e2b37f40e 100644
--- a/Eigen/src/Eigenvalues/RealQZ.h
+++ b/Eigen/src/Eigenvalues/RealQZ.h
@@ -161,7 +161,7 @@ namespace Eigen {
/** \brief Reports whether previous computation was successful.
*
- * \returns \c Success if computation was succesful, \c NoConvergence otherwise.
+ * \returns \c Success if computation was successful, \c NoConvergence otherwise.
*/
ComputationInfo info() const
{
diff --git a/Eigen/src/Eigenvalues/RealSchur.h b/Eigen/src/Eigenvalues/RealSchur.h
index f5c86041d..9e71f3040 100644
--- a/Eigen/src/Eigenvalues/RealSchur.h
+++ b/Eigen/src/Eigenvalues/RealSchur.h
@@ -190,7 +190,7 @@ template<typename _MatrixType> class RealSchur
RealSchur& computeFromHessenberg(const HessMatrixType& matrixH, const OrthMatrixType& matrixQ, bool computeU);
/** \brief Reports whether previous computation was successful.
*
- * \returns \c Success if computation was succesful, \c NoConvergence otherwise.
+ * \returns \c Success if computation was successful, \c NoConvergence otherwise.
*/
ComputationInfo info() const
{
diff --git a/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h b/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h
index 9ddd553f2..040f8d3bb 100644
--- a/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h
+++ b/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h
@@ -337,7 +337,7 @@ template<typename _MatrixType> class SelfAdjointEigenSolver
/** \brief Reports whether previous computation was successful.
*
- * \returns \c Success if computation was succesful, \c NoConvergence otherwise.
+ * \returns \c Success if computation was successful, \c NoConvergence otherwise.
*/
EIGEN_DEVICE_FUNC
ComputationInfo info() const
diff --git a/Eigen/src/Geometry/Scaling.h b/Eigen/src/Geometry/Scaling.h
index 8d9acf252..df650fda6 100755
--- a/Eigen/src/Geometry/Scaling.h
+++ b/Eigen/src/Geometry/Scaling.h
@@ -128,7 +128,7 @@ public:
/** Concatenates a linear transformation matrix and a uniform scaling
* \relates UniformScaling
*/
-// NOTE this operator is defiend in MatrixBase and not as a friend function
+// NOTE this operator is defined in MatrixBase and not as a friend function
// of UniformScaling to fix an internal crash of Intel's ICC
template<typename Derived,typename Scalar>
EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(Derived,Scalar,product)
diff --git a/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h b/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h
index 338e6f10a..43bd8e8f6 100644
--- a/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h
+++ b/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h
@@ -136,7 +136,7 @@ class IncompleteLUT : public SparseSolverBase<IncompleteLUT<_Scalar, _StorageInd
/** \brief Reports whether previous computation was successful.
*
- * \returns \c Success if computation was succesful,
+ * \returns \c Success if computation was successful,
* \c NumericalIssue if the matrix.appears to be negative.
*/
ComputationInfo info() const
@@ -230,7 +230,7 @@ void IncompleteLUT<Scalar,StorageIndex>::analyzePattern(const _MatrixType& amat)
SparseMatrix<Scalar,ColMajor, StorageIndex> mat1 = amat;
SparseMatrix<Scalar,ColMajor, StorageIndex> mat2 = amat.transpose();
// FIXME for a matrix with nearly symmetric pattern, mat2+mat1 is the appropriate choice.
- // on the other hand for a really non-symmetric pattern, mat2*mat1 should be prefered...
+ // on the other hand for a really non-symmetric pattern, mat2*mat1 should be preferred...
SparseMatrix<Scalar,ColMajor, StorageIndex> AtA = mat2 + mat1;
AMDOrdering<StorageIndex> ordering;
ordering(AtA,m_P);
diff --git a/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h b/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h
index 7c2326eb7..bfeee71cd 100644
--- a/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h
+++ b/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h
@@ -275,7 +275,7 @@ public:
const Preconditioner& preconditioner() const { return m_preconditioner; }
/** \returns the max number of iterations.
- * It is either the value setted by setMaxIterations or, by default,
+ * It is either the value set by setMaxIterations or, by default,
* twice the number of columns of the matrix.
*/
Index maxIterations() const
diff --git a/Eigen/src/KLUSupport/KLUSupport.h b/Eigen/src/KLUSupport/KLUSupport.h
index a9e8633d9..d2633a935 100644
--- a/Eigen/src/KLUSupport/KLUSupport.h
+++ b/Eigen/src/KLUSupport/KLUSupport.h
@@ -106,7 +106,7 @@ class KLU : public SparseSolverBase<KLU<_MatrixType> >
/** \brief Reports whether previous computation was successful.
*
- * \returns \c Success if computation was succesful,
+ * \returns \c Success if computation was successful,
* \c NumericalIssue if the matrix.appears to be negative.
*/
ComputationInfo info() const
diff --git a/Eigen/src/LU/FullPivLU.h b/Eigen/src/LU/FullPivLU.h
index ec61086d5..50d1bb41b 100644
--- a/Eigen/src/LU/FullPivLU.h
+++ b/Eigen/src/LU/FullPivLU.h
@@ -48,7 +48,7 @@ template<typename _MatrixType> struct traits<FullPivLU<_MatrixType> >
* The data of the LU decomposition can be directly accessed through the methods matrixLU(),
* permutationP(), permutationQ().
*
- * As an exemple, here is how the original matrix can be retrieved:
+ * As an example, here is how the original matrix can be retrieved:
* \include class_FullPivLU.cpp
* Output: \verbinclude class_FullPivLU.out
*
diff --git a/Eigen/src/LU/PartialPivLU.h b/Eigen/src/LU/PartialPivLU.h
index d43961887..bfcd2c95b 100644
--- a/Eigen/src/LU/PartialPivLU.h
+++ b/Eigen/src/LU/PartialPivLU.h
@@ -420,8 +420,8 @@ struct partial_lu_impl
* \returns The index of the first pivot which is exactly zero if any, or a negative number otherwise.
*
* \note This very low level interface using pointers, etc. is to:
- * 1 - reduce the number of instanciations to the strict minimum
- * 2 - avoid infinite recursion of the instanciations with Block<Block<Block<...> > >
+ * 1 - reduce the number of instantiations to the strict minimum
+ * 2 - avoid infinite recursion of the instantiations with Block<Block<Block<...> > >
*/
static Index blocked_lu(Index rows, Index cols, Scalar* lu_data, Index luStride, PivIndex* row_transpositions, PivIndex& nb_transpositions, Index maxBlockSize=256)
{
diff --git a/Eigen/src/OrderingMethods/Eigen_Colamd.h b/Eigen/src/OrderingMethods/Eigen_Colamd.h
index da85b4d6e..67fcad3f7 100644
--- a/Eigen/src/OrderingMethods/Eigen_Colamd.h
+++ b/Eigen/src/OrderingMethods/Eigen_Colamd.h
@@ -1493,7 +1493,7 @@ static inline void order_children
c = Col [c].shared1.parent ;
/* continue until we hit an ordered column. There are */
- /* guarranteed not to be anymore unordered columns */
+ /* guaranteed not to be anymore unordered columns */
/* above an ordered column */
} while (Col [c].shared2.order == COLAMD_EMPTY) ;
@@ -1638,7 +1638,7 @@ static void detect_super_cols
COLAMD_ASSERT (ROW_IS_ALIVE (*cp1)) ;
COLAMD_ASSERT (ROW_IS_ALIVE (*cp2)) ;
/* row indices will same order for both supercols, */
- /* no gather scatter nessasary */
+ /* no gather scatter necessary */
if (*cp1++ != *cp2++)
{
break ;
@@ -1688,7 +1688,7 @@ static void detect_super_cols
/*
Defragments and compacts columns and rows in the workspace A. Used when
- all avaliable memory has been used while performing row merging. Returns
+ all available memory has been used while performing row merging. Returns
the index of the first free position in A, after garbage collection. The
time taken by this routine is linear is the size of the array A, which is
itself linear in the number of nonzeros in the input matrix.
diff --git a/Eigen/src/PaStiXSupport/PaStiXSupport.h b/Eigen/src/PaStiXSupport/PaStiXSupport.h
index 160d8a523..37426877a 100644
--- a/Eigen/src/PaStiXSupport/PaStiXSupport.h
+++ b/Eigen/src/PaStiXSupport/PaStiXSupport.h
@@ -203,7 +203,7 @@ class PastixBase : public SparseSolverBase<Derived>
/** \brief Reports whether previous computation was successful.
*
- * \returns \c Success if computation was succesful,
+ * \returns \c Success if computation was successful,
* \c NumericalIssue if the PaStiX reports a problem
* \c InvalidInput if the input matrix is invalid
*
diff --git a/Eigen/src/PardisoSupport/PardisoSupport.h b/Eigen/src/PardisoSupport/PardisoSupport.h
index 091c3970e..fb2ba04b4 100644
--- a/Eigen/src/PardisoSupport/PardisoSupport.h
+++ b/Eigen/src/PardisoSupport/PardisoSupport.h
@@ -140,7 +140,7 @@ class PardisoImpl : public SparseSolverBase<Derived>
/** \brief Reports whether previous computation was successful.
*
- * \returns \c Success if computation was succesful,
+ * \returns \c Success if computation was successful,
* \c NumericalIssue if the matrix appears to be negative.
*/
ComputationInfo info() const
diff --git a/Eigen/src/QR/ColPivHouseholderQR.h b/Eigen/src/QR/ColPivHouseholderQR.h
index 5270eaca2..ed47b05e3 100644
--- a/Eigen/src/QR/ColPivHouseholderQR.h
+++ b/Eigen/src/QR/ColPivHouseholderQR.h
@@ -402,7 +402,7 @@ template<typename _MatrixType> class ColPivHouseholderQR
*/
RealScalar maxPivot() const { return m_maxpivot; }
- /** \brief Reports whether the QR factorization was succesful.
+ /** \brief Reports whether the QR factorization was successful.
*
* \note This function always returns \c Success. It is provided for compatibility
* with other factorization routines.
diff --git a/Eigen/src/QR/CompleteOrthogonalDecomposition.h b/Eigen/src/QR/CompleteOrthogonalDecomposition.h
index 13b61fcdb..880becb25 100644
--- a/Eigen/src/QR/CompleteOrthogonalDecomposition.h
+++ b/Eigen/src/QR/CompleteOrthogonalDecomposition.h
@@ -353,7 +353,7 @@ class CompleteOrthogonalDecomposition {
inline RealScalar maxPivot() const { return m_cpqr.maxPivot(); }
/** \brief Reports whether the complete orthogonal decomposition was
- * succesful.
+ * successful.
*
* \note This function always returns \c Success. It is provided for
* compatibility
diff --git a/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h b/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h
index 953d57c9d..1a5c5254e 100644
--- a/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h
+++ b/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h
@@ -220,7 +220,7 @@ class SPQR : public SparseSolverBase<SPQR<_MatrixType> >
/** \brief Reports whether previous computation was successful.
*
- * \returns \c Success if computation was succesful,
+ * \returns \c Success if computation was successful,
* \c NumericalIssue if the sparse QR can not be computed
*/
ComputationInfo info() const
diff --git a/Eigen/src/SVD/BDCSVD.h b/Eigen/src/SVD/BDCSVD.h
index 06865a331..a24deb96a 100644
--- a/Eigen/src/SVD/BDCSVD.h
+++ b/Eigen/src/SVD/BDCSVD.h
@@ -62,7 +62,7 @@ struct traits<BDCSVD<_MatrixType> >
* recommended and can several order of magnitude faster.
*
* \warning this algorithm is unlikely to provide accurate result when compiled with unsafe math optimizations.
- * For instance, this concerns Intel's compiler (ICC), which perfroms such optimization by default unless
+ * For instance, this concerns Intel's compiler (ICC), which performs such optimization by default unless
* you compile with the \c -fp-model \c precise option. Likewise, the \c -ffast-math option of GCC or clang will
* significantly degrade the accuracy.
*
diff --git a/Eigen/src/SVD/UpperBidiagonalization.h b/Eigen/src/SVD/UpperBidiagonalization.h
index 11ac847e1..0526ac931 100644
--- a/Eigen/src/SVD/UpperBidiagonalization.h
+++ b/Eigen/src/SVD/UpperBidiagonalization.h
@@ -202,7 +202,7 @@ void upperbidiagonalization_blocked_helper(MatrixType& A,
{
SubColumnType y_k( Y.col(k).tail(remainingCols) );
- // let's use the begining of column k of Y as a temporary vector
+ // let's use the beginning of column k of Y as a temporary vector
SubColumnType tmp( Y.col(k).head(k) );
y_k.noalias() = A.block(k,k+1, remainingRows,remainingCols).adjoint() * v_k; // bottleneck
tmp.noalias() = V_k1.adjoint() * v_k;
@@ -231,7 +231,7 @@ void upperbidiagonalization_blocked_helper(MatrixType& A,
{
SubColumnType x_k ( X.col(k).tail(remainingRows-1) );
- // let's use the begining of column k of X as a temporary vectors
+ // let's use the beginning of column k of X as a temporary vectors
// note that tmp0 and tmp1 overlaps
SubColumnType tmp0 ( X.col(k).head(k) ),
tmp1 ( X.col(k).head(k+1) );
diff --git a/Eigen/src/SparseCholesky/SimplicialCholesky.h b/Eigen/src/SparseCholesky/SimplicialCholesky.h
index 2907f6529..b9ca94bc3 100644
--- a/Eigen/src/SparseCholesky/SimplicialCholesky.h
+++ b/Eigen/src/SparseCholesky/SimplicialCholesky.h
@@ -101,7 +101,7 @@ class SimplicialCholeskyBase : public SparseSolverBase<Derived>
/** \brief Reports whether previous computation was successful.
*
- * \returns \c Success if computation was succesful,
+ * \returns \c Success if computation was successful,
* \c NumericalIssue if the matrix.appears to be negative.
*/
ComputationInfo info() const
diff --git a/Eigen/src/SparseCore/SparseMatrix.h b/Eigen/src/SparseCore/SparseMatrix.h
index 323c2323b..8f77194b6 100644
--- a/Eigen/src/SparseCore/SparseMatrix.h
+++ b/Eigen/src/SparseCore/SparseMatrix.h
@@ -21,7 +21,7 @@ namespace Eigen {
* This class implements a more versatile variants of the common \em compressed row/column storage format.
* Each colmun's (resp. row) non zeros are stored as a pair of value with associated row (resp. colmiun) index.
* All the non zeros are stored in a single large buffer. Unlike the \em compressed format, there might be extra
- * space inbetween the nonzeros of two successive colmuns (resp. rows) such that insertion of new non-zero
+ * space in between the nonzeros of two successive colmuns (resp. rows) such that insertion of new non-zero
* can be done with limited memory reallocation and copies.
*
* A call to the function makeCompressed() turns the matrix into the standard \em compressed format
@@ -503,7 +503,7 @@ class SparseMatrix
}
}
- /** Suppresses all nonzeros which are \b much \b smaller \b than \a reference under the tolerence \a epsilon */
+ /** Suppresses all nonzeros which are \b much \b smaller \b than \a reference under the tolerance \a epsilon */
void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
{
prune(default_prunning_func(reference,epsilon));
@@ -986,7 +986,7 @@ void set_from_triplets(const InputIterator& begin, const InputIterator& end, Spa
*
* \warning The list of triplets is read multiple times (at least twice). Therefore, it is not recommended to define
* an abstract iterator over a complex data-structure that would be expensive to evaluate. The triplets should rather
- * be explicitely stored into a std::vector for instance.
+ * be explicitly stored into a std::vector for instance.
*/
template<typename Scalar, int _Options, typename _StorageIndex>
template<typename InputIterators>
diff --git a/Eigen/src/SparseCore/SparseProduct.h b/Eigen/src/SparseCore/SparseProduct.h
index 4cbf68781..c495a7398 100644
--- a/Eigen/src/SparseCore/SparseProduct.h
+++ b/Eigen/src/SparseCore/SparseProduct.h
@@ -17,7 +17,7 @@ namespace Eigen {
* The automatic pruning of the small values can be achieved by calling the pruned() function
* in which case a totally different product algorithm is employed:
* \code
- * C = (A*B).pruned(); // supress numerical zeros (exact)
+ * C = (A*B).pruned(); // suppress numerical zeros (exact)
* C = (A*B).pruned(ref);
* C = (A*B).pruned(ref,epsilon);
* \endcode
diff --git a/Eigen/src/SparseCore/SparseVector.h b/Eigen/src/SparseCore/SparseVector.h
index 19b0fbc9d..05779be68 100644
--- a/Eigen/src/SparseCore/SparseVector.h
+++ b/Eigen/src/SparseCore/SparseVector.h
@@ -281,7 +281,7 @@ class SparseVector
}
/** Swaps the values of \c *this and \a other.
- * Overloaded for performance: this version performs a \em shallow swap by swaping pointers and attributes only.
+ * Overloaded for performance: this version performs a \em shallow swap by swapping pointers and attributes only.
* \sa SparseMatrixBase::swap()
*/
inline void swap(SparseVector& other)
diff --git a/Eigen/src/SparseLU/SparseLU.h b/Eigen/src/SparseLU/SparseLU.h
index f883ab383..383a203b4 100644
--- a/Eigen/src/SparseLU/SparseLU.h
+++ b/Eigen/src/SparseLU/SparseLU.h
@@ -193,7 +193,7 @@ class SparseLU : public SparseSolverBase<SparseLU<_MatrixType,_OrderingType> >,
/** \brief Reports whether previous computation was successful.
*
- * \returns \c Success if computation was succesful,
+ * \returns \c Success if computation was successful,
* \c NumericalIssue if the LU factorization reports a problem, zero diagonal for instance
* \c InvalidInput if the input matrix is invalid
*
diff --git a/Eigen/src/SparseLU/SparseLU_Memory.h b/Eigen/src/SparseLU/SparseLU_Memory.h
index 4dc42e87b..349bfd585 100644
--- a/Eigen/src/SparseLU/SparseLU_Memory.h
+++ b/Eigen/src/SparseLU/SparseLU_Memory.h
@@ -51,7 +51,7 @@ inline Index LUTempSpace(Index&m, Index& w)
/**
- * Expand the existing storage to accomodate more fill-ins
+ * Expand the existing storage to accommodate more fill-ins
* \param vec Valid pointer to the vector to allocate or expand
* \param[in,out] length At input, contain the current length of the vector that is to be increased. At output, length of the newly allocated vector
* \param[in] nbElts Current number of elements in the factors
diff --git a/Eigen/src/SparseLU/SparseLU_column_dfs.h b/Eigen/src/SparseLU/SparseLU_column_dfs.h
index c98b30e32..5a2c941b4 100644
--- a/Eigen/src/SparseLU/SparseLU_column_dfs.h
+++ b/Eigen/src/SparseLU/SparseLU_column_dfs.h
@@ -151,7 +151,7 @@ Index SparseLUImpl<Scalar,StorageIndex>::column_dfs(const Index m, const Index j
StorageIndex ito = glu.xlsub(fsupc+1);
glu.xlsub(jcolm1) = ito;
StorageIndex istop = ito + jptr - jm1ptr;
- xprune(jcolm1) = istop; // intialize xprune(jcol-1)
+ xprune(jcolm1) = istop; // initialize xprune(jcol-1)
glu.xlsub(jcol) = istop;
for (StorageIndex ifrom = jm1ptr; ifrom < nextl; ++ifrom, ++ito)
@@ -166,7 +166,7 @@ Index SparseLUImpl<Scalar,StorageIndex>::column_dfs(const Index m, const Index j
// Tidy up the pointers before exit
glu.xsup(nsuper+1) = jcolp1;
glu.supno(jcolp1) = nsuper;
- xprune(jcol) = StorageIndex(nextl); // Intialize upper bound for pruning
+ xprune(jcol) = StorageIndex(nextl); // Initialize upper bound for pruning
glu.xlsub(jcolp1) = StorageIndex(nextl);
return 0;
diff --git a/Eigen/src/SparseLU/SparseLU_gemm_kernel.h b/Eigen/src/SparseLU/SparseLU_gemm_kernel.h
index 95ba7413f..e37c2fe0d 100644
--- a/Eigen/src/SparseLU/SparseLU_gemm_kernel.h
+++ b/Eigen/src/SparseLU/SparseLU_gemm_kernel.h
@@ -215,7 +215,7 @@ void sparselu_gemm(Index m, Index n, Index d, const Scalar* A, Index lda, const
if(RK==4){ a3 = pload<Packet>(A3+i+(I+1)*PacketSize); }\
pstore(C0+i+(I)*PacketSize, c0);
- // agressive vectorization and peeling
+ // aggressive vectorization and peeling
for(Index i=0; i<actual_b_end1; i+=PacketSize*8)
{
EIGEN_ASM_COMMENT("SPARSELU_GEMML_KERNEL2");
diff --git a/Eigen/src/SparseLU/SparseLU_panel_bmod.h b/Eigen/src/SparseLU/SparseLU_panel_bmod.h
index 822cf32c3..f052001c8 100644
--- a/Eigen/src/SparseLU/SparseLU_panel_bmod.h
+++ b/Eigen/src/SparseLU/SparseLU_panel_bmod.h
@@ -38,7 +38,7 @@ namespace internal {
* \brief Performs numeric block updates (sup-panel) in topological order.
*
* Before entering this routine, the original nonzeros in the panel
- * were already copied i nto the spa[m,w]
+ * were already copied into the spa[m,w]
*
* \param m number of rows in the matrix
* \param w Panel size
diff --git a/Eigen/src/SuperLUSupport/SuperLUSupport.h b/Eigen/src/SuperLUSupport/SuperLUSupport.h
index 50a69f306..4bb95eb8b 100644
--- a/Eigen/src/SuperLUSupport/SuperLUSupport.h
+++ b/Eigen/src/SuperLUSupport/SuperLUSupport.h
@@ -352,7 +352,7 @@ class SuperLUBase : public SparseSolverBase<Derived>
/** \brief Reports whether previous computation was successful.
*
- * \returns \c Success if computation was succesful,
+ * \returns \c Success if computation was successful,
* \c NumericalIssue if the matrix.appears to be negative.
*/
ComputationInfo info() const
diff --git a/Eigen/src/UmfPackSupport/UmfPackSupport.h b/Eigen/src/UmfPackSupport/UmfPackSupport.h
index 9568cc1d5..c636f17ac 100644
--- a/Eigen/src/UmfPackSupport/UmfPackSupport.h
+++ b/Eigen/src/UmfPackSupport/UmfPackSupport.h
@@ -201,7 +201,7 @@ class UmfPackLU : public SparseSolverBase<UmfPackLU<_MatrixType> >
/** \brief Reports whether previous computation was successful.
*
- * \returns \c Success if computation was succesful,
+ * \returns \c Success if computation was successful,
* \c NumericalIssue if the matrix.appears to be negative.
*/
ComputationInfo info() const
diff --git a/Eigen/src/plugins/IndexedViewMethods.h b/Eigen/src/plugins/IndexedViewMethods.h
index a7ec63adf..9ad2d9aee 100644
--- a/Eigen/src/plugins/IndexedViewMethods.h
+++ b/Eigen/src/plugins/IndexedViewMethods.h
@@ -112,7 +112,7 @@ operator()(const RowIndices& rowIndices, const ColIndices& colIndices) EIGEN_IND
#if EIGEN_HAS_STATIC_ARRAY_TEMPLATE
-// The folowing three overloads are needed to handle raw Index[N] arrays.
+// The following three overloads are needed to handle raw Index[N] arrays.
template<typename RowIndicesT, std::size_t RowIndicesN, typename ColIndices>
IndexedView<EIGEN_INDEXED_VIEW_METHOD_CONST Derived,const RowIndicesT (&)[RowIndicesN],typename IvcColType<ColIndices>::type>
diff --git a/bench/analyze-blocking-sizes.cpp b/bench/analyze-blocking-sizes.cpp
index d563a1d2d..6bc4aca3d 100644
--- a/bench/analyze-blocking-sizes.cpp
+++ b/bench/analyze-blocking-sizes.cpp
@@ -825,7 +825,7 @@ int main(int argc, char* argv[])
}
for (int i = 1; i < argc; i++) {
bool arg_handled = false;
- // Step 1. Try to match action invokation names.
+ // Step 1. Try to match action invocation names.
for (auto it = available_actions.begin(); it != available_actions.end(); ++it) {
if (!strcmp(argv[i], (*it)->invokation_name())) {
if (!action) {
diff --git a/bench/btl/README b/bench/btl/README
index f3f5fb36f..ebed88960 100644
--- a/bench/btl/README
+++ b/bench/btl/README
@@ -36,7 +36,7 @@ For instance:
You can also select a given set of actions defining the environment variable BTL_CONFIG this way:
BTL_CONFIG="-a action1{:action2}*" ctest -V
-An exemple:
+An example:
BTL_CONFIG="-a axpy:vector_matrix:trisolve:ata" ctest -V -R eigen2
Finally, if bench results already exist (the bench*.dat files) then they merges by keeping the best for each matrix size. If you want to overwrite the previous ones you can simply add the "--overwrite" option:
diff --git a/bench/btl/generic_bench/bench.hh b/bench/btl/generic_bench/bench.hh
index 7b7b951b5..0732940d5 100644
--- a/bench/btl/generic_bench/bench.hh
+++ b/bench/btl/generic_bench/bench.hh
@@ -159,7 +159,7 @@ BTL_DONT_INLINE void bench( int size_min, int size_max, int nb_point ){
// bench<Mixed_Perf_Analyzer,Action>(size_min,size_max,nb_point);
- // Only for small problem size. Otherwize it will be too long
+ // Only for small problem size. Otherwise it will be too long
// bench<X86_Perf_Analyzer,Action>(size_min,size_max,nb_point);
// bench<STL_Perf_Analyzer,Action>(size_min,size_max,nb_point);
diff --git a/bench/btl/generic_bench/utils/size_log.hh b/bench/btl/generic_bench/utils/size_log.hh
index 13a3da7a8..68945e7cc 100644
--- a/bench/btl/generic_bench/utils/size_log.hh
+++ b/bench/btl/generic_bench/utils/size_log.hh
@@ -23,7 +23,7 @@
#include "math.h"
// The Vector class must satisfy the following part of STL vector concept :
// resize() method
-// [] operator for seting element
+// [] operator for setting element
// the vector element are int compatible.
template<class Vector>
void size_log(const int nb_point, const int size_min, const int size_max, Vector & X)
diff --git a/bench/btl/generic_bench/utils/xy_file.hh b/bench/btl/generic_bench/utils/xy_file.hh
index 4571bed8f..0492faf09 100644
--- a/bench/btl/generic_bench/utils/xy_file.hh
+++ b/bench/btl/generic_bench/utils/xy_file.hh
@@ -55,7 +55,7 @@ bool read_xy_file(const std::string & filename, std::vector<int> & tab_sizes,
// The Vector class must satisfy the following part of STL vector concept :
// resize() method
-// [] operator for seting element
+// [] operator for setting element
// the vector element must have the << operator define
using namespace std;
diff --git a/bench/btl/libs/ublas/ublas_interface.hh b/bench/btl/libs/ublas/ublas_interface.hh
index 95cad5195..f59b7cf2f 100644
--- a/bench/btl/libs/ublas/ublas_interface.hh
+++ b/bench/btl/libs/ublas/ublas_interface.hh
@@ -100,7 +100,7 @@ public :
Y+=coef*X;
}
- // alias free assignements
+ // alias free assignments
static inline void matrix_vector_product(gene_matrix & A, gene_vector & B, gene_vector & X, int N){
X.assign(prod(A,B));
diff --git a/bench/eig33.cpp b/bench/eig33.cpp
index 47947a9be..f003d8a53 100644
--- a/bench/eig33.cpp
+++ b/bench/eig33.cpp
@@ -101,7 +101,7 @@ void eigen33(const Matrix& mat, Matrix& evecs, Vector& evals)
computeRoots(scaledMat,evals);
// compute the eigen vectors
- // **here we assume 3 differents eigenvalues**
+ // **here we assume 3 different eigenvalues**
// "optimized version" which appears to be slower with gcc!
// Vector base;
diff --git a/bench/spbench/spbenchsolver.cpp b/bench/spbench/spbenchsolver.cpp
index 4acd0039c..2a7351124 100644
--- a/bench/spbench/spbenchsolver.cpp
+++ b/bench/spbench/spbenchsolver.cpp
@@ -54,7 +54,7 @@ int main(int argc, char ** args)
statbuf.close();
}
else
- std::cerr << "Unable to open the provided file for writting... \n";
+ std::cerr << "Unable to open the provided file for writing... \n";
}
// Get the maximum number of iterations and the tolerance
diff --git a/blas/f2c/ctbmv.c b/blas/f2c/ctbmv.c
index 790fd581f..a6e0dae80 100644
--- a/blas/f2c/ctbmv.c
+++ b/blas/f2c/ctbmv.c
@@ -147,7 +147,7 @@
/* ( 1 + ( n - 1 )*abs( INCX ) ). */
/* Before entry, the incremented array X must contain the n */
/* element vector x. On exit, X is overwritten with the */
-/* tranformed vector x. */
+/* transformed vector x. */
/* INCX - INTEGER. */
/* On entry, INCX specifies the increment for the elements of */
diff --git a/blas/f2c/dtbmv.c b/blas/f2c/dtbmv.c
index fdf73ebb5..aa67d19da 100644
--- a/blas/f2c/dtbmv.c
+++ b/blas/f2c/dtbmv.c
@@ -143,7 +143,7 @@
/* ( 1 + ( n - 1 )*abs( INCX ) ). */
/* Before entry, the incremented array X must contain the n */
/* element vector x. On exit, X is overwritten with the */
-/* tranformed vector x. */
+/* transformed vector x. */
/* INCX - INTEGER. */
/* On entry, INCX specifies the increment for the elements of */
diff --git a/blas/f2c/stbmv.c b/blas/f2c/stbmv.c
index fcf9ce336..b5a68b545 100644
--- a/blas/f2c/stbmv.c
+++ b/blas/f2c/stbmv.c
@@ -143,7 +143,7 @@
/* ( 1 + ( n - 1 )*abs( INCX ) ). */
/* Before entry, the incremented array X must contain the n */
/* element vector x. On exit, X is overwritten with the */
-/* tranformed vector x. */
+/* transformed vector x. */
/* INCX - INTEGER. */
/* On entry, INCX specifies the increment for the elements of */
diff --git a/blas/f2c/ztbmv.c b/blas/f2c/ztbmv.c
index 4cdcd7f88..3bf0beb01 100644
--- a/blas/f2c/ztbmv.c
+++ b/blas/f2c/ztbmv.c
@@ -147,7 +147,7 @@
/* ( 1 + ( n - 1 )*abs( INCX ) ). */
/* Before entry, the incremented array X must contain the n */
/* element vector x. On exit, X is overwritten with the */
-/* tranformed vector x. */
+/* transformed vector x. */
/* INCX - INTEGER. */
/* On entry, INCX specifies the increment for the elements of */
diff --git a/blas/level1_impl.h b/blas/level1_impl.h
index f857bfa20..6e7f8c976 100644
--- a/blas/level1_impl.h
+++ b/blas/level1_impl.h
@@ -33,7 +33,7 @@ int EIGEN_BLAS_FUNC(copy)(int *n, RealScalar *px, int *incx, RealScalar *py, int
Scalar* x = reinterpret_cast<Scalar*>(px);
Scalar* y = reinterpret_cast<Scalar*>(py);
- // be carefull, *incx==0 is allowed !!
+ // be careful, *incx==0 is allowed !!
if(*incx==1 && *incy==1)
make_vector(y,*n) = make_vector(x,*n);
else
diff --git a/blas/testing/cblat1.f b/blas/testing/cblat1.f
index 8ca67fb19..73015f5a9 100644
--- a/blas/testing/cblat1.f
+++ b/blas/testing/cblat1.f
@@ -619,7 +619,7 @@
SUBROUTINE STEST1(SCOMP1,STRUE1,SSIZE,SFAC)
* ************************* STEST1 *****************************
*
-* THIS IS AN INTERFACE SUBROUTINE TO ACCOMODATE THE FORTRAN
+* THIS IS AN INTERFACE SUBROUTINE TO ACCOMMODATE THE FORTRAN
* REQUIREMENT THAT WHEN A DUMMY ARGUMENT IS AN ARRAY, THE
* ACTUAL ARGUMENT MUST ALSO BE AN ARRAY OR AN ARRAY ELEMENT.
*
diff --git a/blas/testing/dblat1.f b/blas/testing/dblat1.f
index 30691f9bf..03d9f1345 100644
--- a/blas/testing/dblat1.f
+++ b/blas/testing/dblat1.f
@@ -990,7 +990,7 @@
SUBROUTINE STEST1(SCOMP1,STRUE1,SSIZE,SFAC)
* ************************* STEST1 *****************************
*
-* THIS IS AN INTERFACE SUBROUTINE TO ACCOMODATE THE FORTRAN
+* THIS IS AN INTERFACE SUBROUTINE TO ACCOMMODATE THE FORTRAN
* REQUIREMENT THAT WHEN A DUMMY ARGUMENT IS AN ARRAY, THE
* ACTUAL ARGUMENT MUST ALSO BE AN ARRAY OR AN ARRAY ELEMENT.
*
diff --git a/blas/testing/sblat1.f b/blas/testing/sblat1.f
index 6657c2693..4d43d9b48 100644
--- a/blas/testing/sblat1.f
+++ b/blas/testing/sblat1.f
@@ -946,7 +946,7 @@
SUBROUTINE STEST1(SCOMP1,STRUE1,SSIZE,SFAC)
* ************************* STEST1 *****************************
*
-* THIS IS AN INTERFACE SUBROUTINE TO ACCOMODATE THE FORTRAN
+* THIS IS AN INTERFACE SUBROUTINE TO ACCOMMODATE THE FORTRAN
* REQUIREMENT THAT WHEN A DUMMY ARGUMENT IS AN ARRAY, THE
* ACTUAL ARGUMENT MUST ALSO BE AN ARRAY OR AN ARRAY ELEMENT.
*
diff --git a/blas/testing/zblat1.f b/blas/testing/zblat1.f
index d30112c63..c00b67dc8 100644
--- a/blas/testing/zblat1.f
+++ b/blas/testing/zblat1.f
@@ -619,7 +619,7 @@
SUBROUTINE STEST1(SCOMP1,STRUE1,SSIZE,SFAC)
* ************************* STEST1 *****************************
*
-* THIS IS AN INTERFACE SUBROUTINE TO ACCOMODATE THE FORTRAN
+* THIS IS AN INTERFACE SUBROUTINE TO ACCOMMODATE THE FORTRAN
* REQUIREMENT THAT WHEN A DUMMY ARGUMENT IS AN ARRAY, THE
* ACTUAL ARGUMENT MUST ALSO BE AN ARRAY OR AN ARRAY ELEMENT.
*
diff --git a/cmake/EigenConfigureTesting.cmake b/cmake/EigenConfigureTesting.cmake
index afc24b5e9..a2a4f54b9 100644
--- a/cmake/EigenConfigureTesting.cmake
+++ b/cmake/EigenConfigureTesting.cmake
@@ -20,7 +20,7 @@ include(CTest)
set(EIGEN_TEST_BUILD_FLAGS "" CACHE STRING "Options passed to the build command of unit tests")
# Overwrite default DartConfiguration.tcl such that ctest can build our unit tests.
-# Recall that our unit tests are not in the "all" target, so we have to explicitely ask ctest to build our custom 'buildtests' target.
+# Recall that our unit tests are not in the "all" target, so we have to explicitly ask ctest to build our custom 'buildtests' target.
# At this stage, we can also add custom flags to the build tool through the user defined EIGEN_TEST_BUILD_FLAGS variable.
file(READ "${CMAKE_CURRENT_BINARY_DIR}/DartConfiguration.tcl" EIGEN_DART_CONFIG_FILE)
# try to grab the default flags
@@ -39,7 +39,7 @@ ei_init_testing()
# configure Eigen related testing options
option(EIGEN_NO_ASSERTION_CHECKING "Disable checking of assertions using exceptions" OFF)
-option(EIGEN_DEBUG_ASSERTS "Enable advanced debuging of assertions" OFF)
+option(EIGEN_DEBUG_ASSERTS "Enable advanced debugging of assertions" OFF)
if(CMAKE_COMPILER_IS_GNUCXX)
option(EIGEN_COVERAGE_TESTING "Enable/disable gcov" OFF)
diff --git a/cmake/EigenTesting.cmake b/cmake/EigenTesting.cmake
index 4a34ddef5..16d6d279f 100644
--- a/cmake/EigenTesting.cmake
+++ b/cmake/EigenTesting.cmake
@@ -247,7 +247,7 @@ endmacro(ei_add_test_internal_sycl)
#
# If EIGEN_SPLIT_LARGE_TESTS is ON, the test is split into multiple executables
# test_<testname>_<N>
-# where N runs from 1 to the greatest occurence found in the source file. Each of these
+# where N runs from 1 to the greatest occurrence found in the source file. Each of these
# executables is built passing -DEIGEN_TEST_PART_N. This allows to split large tests
# into smaller executables.
#
@@ -269,8 +269,8 @@ macro(ei_add_test testname)
file(READ "${filename}" test_source)
set(parts 0)
string(REGEX MATCHALL "CALL_SUBTEST_[0-9]+|EIGEN_TEST_PART_[0-9]+|EIGEN_SUFFIXES(;[0-9]+)+"
- occurences "${test_source}")
- string(REGEX REPLACE "CALL_SUBTEST_|EIGEN_TEST_PART_|EIGEN_SUFFIXES" "" suffixes "${occurences}")
+ occurrences "${test_source}")
+ string(REGEX REPLACE "CALL_SUBTEST_|EIGEN_TEST_PART_|EIGEN_SUFFIXES" "" suffixes "${occurrences}")
list(REMOVE_DUPLICATES suffixes)
if(EIGEN_SPLIT_LARGE_TESTS AND suffixes)
add_custom_target(${testname})
@@ -303,8 +303,8 @@ macro(ei_add_test_sycl testname)
file(READ "${filename}" test_source)
set(parts 0)
string(REGEX MATCHALL "CALL_SUBTEST_[0-9]+|EIGEN_TEST_PART_[0-9]+|EIGEN_SUFFIXES(;[0-9]+)+"
- occurences "${test_source}")
- string(REGEX REPLACE "CALL_SUBTEST_|EIGEN_TEST_PART_|EIGEN_SUFFIXES" "" suffixes "${occurences}")
+ occurrences "${test_source}")
+ string(REGEX REPLACE "CALL_SUBTEST_|EIGEN_TEST_PART_|EIGEN_SUFFIXES" "" suffixes "${occurrences}")
list(REMOVE_DUPLICATES suffixes)
if(EIGEN_SPLIT_LARGE_TESTS AND suffixes)
add_custom_target(${testname})
diff --git a/cmake/FindComputeCpp.cmake b/cmake/FindComputeCpp.cmake
index e61dedc46..29f2a5007 100644
--- a/cmake/FindComputeCpp.cmake
+++ b/cmake/FindComputeCpp.cmake
@@ -243,7 +243,7 @@ endfunction()
#######################
#
# Adds a SYCL compilation custom command associated with an existing
-# target and sets a dependancy on that new command.
+# target and sets a dependency on that new command.
#
# targetName : Name of the target to add a SYCL to.
# binaryDir : Intermediate directory to output the integration header.
diff --git a/cmake/FindEigen3.cmake b/cmake/FindEigen3.cmake
index 657440ba5..52efb4e15 100644
--- a/cmake/FindEigen3.cmake
+++ b/cmake/FindEigen3.cmake
@@ -15,7 +15,7 @@
# Eigen3::Eigen - The header-only Eigen library
#
# This module reads hints about search locations from
-# the following enviroment variables:
+# the following environment variables:
#
# EIGEN3_ROOT
# EIGEN3_ROOT_DIR
diff --git a/debug/msvc/eigen_autoexp_part.dat b/debug/msvc/eigen_autoexp_part.dat
index 07aa43739..35ef5807c 100644
--- a/debug/msvc/eigen_autoexp_part.dat
+++ b/debug/msvc/eigen_autoexp_part.dat
@@ -14,7 +14,7 @@
; * - Eigen::Matrix<*,-1,+,*,*,*>
; * - Eigen::Matrix<*,+,+,*,*,*>
; *
-; * Matrices are displayed properly independantly of the memory
+; * Matrices are displayed properly independently of the memory
; * alignment (RowMajor vs. ColMajor).
; *
; * This file is distributed WITHOUT ANY WARRANTY. Please ensure
diff --git a/doc/Doxyfile.in b/doc/Doxyfile.in
index 2109978fe..49b9fba39 100644
--- a/doc/Doxyfile.in
+++ b/doc/Doxyfile.in
@@ -1764,7 +1764,7 @@ UML_LOOK = YES
# the class node. If there are many fields or methods and many nodes the
# graph may become too big to be useful. The UML_LIMIT_NUM_FIELDS
# threshold limits the number of items for each type to make the size more
-# managable. Set this to 0 for no limit. Note that the threshold may be
+# manageable. Set this to 0 for no limit. Note that the threshold may be
# exceeded by 50% before the limit is enforced.
UML_LIMIT_NUM_FIELDS = 10
diff --git a/doc/FunctionsTakingEigenTypes.dox b/doc/FunctionsTakingEigenTypes.dox
index 152dda47d..e054714f9 100644
--- a/doc/FunctionsTakingEigenTypes.dox
+++ b/doc/FunctionsTakingEigenTypes.dox
@@ -133,7 +133,7 @@ In this special case, the example is fine and will be working because both param
\section TopicPlainFunctionsFailing In which cases do functions taking a plain Matrix or Array argument fail?
-Here, we consider a slightly modified version of the function given above. This time, we do not want to return the result but pass an additional non-const paramter which allows us to store the result. A first naive implementation might look as follows.
+Here, we consider a slightly modified version of the function given above. This time, we do not want to return the result but pass an additional non-const parameter which allows us to store the result. A first naive implementation might look as follows.
\code
// Note: This code is flawed!
void cov(const MatrixXf& x, const MatrixXf& y, MatrixXf& C)
@@ -176,7 +176,7 @@ The implementation above does now not only work with temporary expressions but i
\section TopicResizingInGenericImplementations How to resize matrices in generic implementations?
-One might think we are done now, right? This is not completely true because in order for our covariance function to be generically applicable, we want the follwing code to work
+One might think we are done now, right? This is not completely true because in order for our covariance function to be generically applicable, we want the following code to work
\code
MatrixXf x = MatrixXf::Random(100,3);
MatrixXf y = MatrixXf::Random(100,3);
diff --git a/doc/PreprocessorDirectives.dox b/doc/PreprocessorDirectives.dox
index b6d08c700..b49f7d3cf 100644
--- a/doc/PreprocessorDirectives.dox
+++ b/doc/PreprocessorDirectives.dox
@@ -51,7 +51,7 @@ are doing.
\section TopicPreprocessorDirectivesCppVersion C++ standard features
-By default, %Eigen strive to automatically detect and enable langage features at compile-time based on
+By default, %Eigen strive to automatically detect and enable language features at compile-time based on
the information provided by the compiler.
- \b EIGEN_MAX_CPP_VER - disables usage of C++ features requiring a version greater than EIGEN_MAX_CPP_VER.
diff --git a/doc/QuickStartGuide.dox b/doc/QuickStartGuide.dox
index ea32c3b3d..23bb2981b 100644
--- a/doc/QuickStartGuide.dox
+++ b/doc/QuickStartGuide.dox
@@ -68,7 +68,7 @@ The output is as follows:
The second example starts by declaring a 3-by-3 matrix \c m which is initialized using the \link DenseBase::Random(Index,Index) Random() \endlink method with random values between -1 and 1. The next line applies a linear mapping such that the values are between 10 and 110. The function call \link DenseBase::Constant(Index,Index,const Scalar&) MatrixXd::Constant\endlink(3,3,1.2) returns a 3-by-3 matrix expression having all coefficients equal to 1.2. The rest is standard arithmetics.
-The next line of the \c main function introduces a new type: \c VectorXd. This represents a (column) vector of arbitrary size. Here, the vector \c v is created to contain \c 3 coefficients which are left unitialized. The one but last line uses the so-called comma-initializer, explained in \ref TutorialAdvancedInitialization, to set all coefficients of the vector \c v to be as follows:
+The next line of the \c main function introduces a new type: \c VectorXd. This represents a (column) vector of arbitrary size. Here, the vector \c v is created to contain \c 3 coefficients which are left uninitialized. The one but last line uses the so-called comma-initializer, explained in \ref TutorialAdvancedInitialization, to set all coefficients of the vector \c v to be as follows:
\f[
v =
diff --git a/doc/SparseQuickReference.dox b/doc/SparseQuickReference.dox
index a25622e80..81a73eec2 100644
--- a/doc/SparseQuickReference.dox
+++ b/doc/SparseQuickReference.dox
@@ -80,7 +80,7 @@ sm1.setZero();
\section SparseBasicInfos Matrix properties
-Beyond the basic functions rows() and cols(), there are some useful functions that are available to easily get some informations from the matrix.
+Beyond the basic functions rows() and cols(), there are some useful functions that are available to easily get some information from the matrix.
<table class="manual">
<tr>
<td> \code
diff --git a/doc/TemplateKeyword.dox b/doc/TemplateKeyword.dox
index b84cfdae9..fbf2c7081 100644
--- a/doc/TemplateKeyword.dox
+++ b/doc/TemplateKeyword.dox
@@ -76,7 +76,7 @@ point where the template is defined, without knowing the actual value of the tem
and \c Derived2 in the example). That means that the compiler cannot know that <tt>dst.triangularView</tt> is
a member template and that the following &lt; symbol is part of the delimiter for the template
parameter. Another possibility would be that <tt>dst.triangularView</tt> is a member variable with the &lt;
-symbol refering to the <tt>operator&lt;()</tt> function. In fact, the compiler should choose the second
+symbol referring to the <tt>operator&lt;()</tt> function. In fact, the compiler should choose the second
possibility, according to the standard. If <tt>dst.triangularView</tt> is a member template (as in our case),
the programmer should specify this explicitly with the \c template keyword and write <tt>dst.template
triangularView</tt>.
diff --git a/doc/TopicLazyEvaluation.dox b/doc/TopicLazyEvaluation.dox
index 101ef8c72..b7820e3e6 100644
--- a/doc/TopicLazyEvaluation.dox
+++ b/doc/TopicLazyEvaluation.dox
@@ -58,7 +58,7 @@ the product <tt>matrix3 * matrix4</tt> gets evaluated immediately into a tempora
\code matrix1 = matrix2 * (matrix3 + matrix4); \endcode
-Here, provided the matrices have at least 2 rows and 2 columns, each coefficienct of the expression <tt>matrix3 + matrix4</tt> is going to be used several times in the matrix product. Instead of computing the sum everytime, it is much better to compute it once and store it in a temporary variable. Eigen understands this and evaluates <tt>matrix3 + matrix4</tt> into a temporary variable before evaluating the product.
+Here, provided the matrices have at least 2 rows and 2 columns, each coefficienct of the expression <tt>matrix3 + matrix4</tt> is going to be used several times in the matrix product. Instead of computing the sum every time, it is much better to compute it once and store it in a temporary variable. Eigen understands this and evaluates <tt>matrix3 + matrix4</tt> into a temporary variable before evaluating the product.
*/
diff --git a/doc/TopicLinearAlgebraDecompositions.dox b/doc/TopicLinearAlgebraDecompositions.dox
index 491470627..991f964cc 100644
--- a/doc/TopicLinearAlgebraDecompositions.dox
+++ b/doc/TopicLinearAlgebraDecompositions.dox
@@ -248,7 +248,7 @@ To get an overview of the true relative speed of the different decomposition, ch
<dt><b>Blocking</b></dt>
<dd>Means the algorithm can work per block, whence guaranteeing a good scaling of the performance for large matrices.</dd>
<dt><b>Implicit Multi Threading (MT)</b></dt>
- <dd>Means the algorithm can take advantage of multicore processors via OpenMP. "Implicit" means the algortihm itself is not parallelized, but that it relies on parallelized matrix-matrix product rountines.</dd>
+ <dd>Means the algorithm can take advantage of multicore processors via OpenMP. "Implicit" means the algortihm itself is not parallelized, but that it relies on parallelized matrix-matrix product routines.</dd>
<dt><b>Explicit Multi Threading (MT)</b></dt>
<dd>Means the algorithm is explicitly parallelized to take advantage of multicore processors via OpenMP.</dd>
<dt><b>Meta-unroller</b></dt>
diff --git a/doc/TopicMultithreading.dox b/doc/TopicMultithreading.dox
index 47c9b261f..bc394f484 100644
--- a/doc/TopicMultithreading.dox
+++ b/doc/TopicMultithreading.dox
@@ -47,7 +47,7 @@ int main(int argc, char** argv)
\warning note that all functions generating random matrices are \b not re-entrant nor thread-safe. Those include DenseBase::Random(), and DenseBase::setRandom() despite a call to Eigen::initParallel(). This is because these functions are based on std::rand which is not re-entrant. For thread-safe random generator, we recommend the use of boost::random or c++11 random feature.
-In the case your application is parallelized with OpenMP, you might want to disable Eigen's own parallization as detailed in the previous section.
+In the case your application is parallelized with OpenMP, you might want to disable Eigen's own parallelization as detailed in the previous section.
*/
diff --git a/doc/TutorialMapClass.dox b/doc/TutorialMapClass.dox
index f8fb0fd2f..caa2539d8 100644
--- a/doc/TutorialMapClass.dox
+++ b/doc/TutorialMapClass.dox
@@ -29,9 +29,9 @@ Map<const Vector4i> mi(pi);
\endcode
where \c pi is an \c int \c *. In this case the size does not have to be passed to the constructor, because it is already specified by the Matrix/Array type.
-Note that Map does not have a default constructor; you \em must pass a pointer to intialize the object. However, you can work around this requirement (see \ref TutorialMapPlacementNew).
+Note that Map does not have a default constructor; you \em must pass a pointer to initialize the object. However, you can work around this requirement (see \ref TutorialMapPlacementNew).
-Map is flexible enough to accomodate a variety of different data representations. There are two other (optional) template parameters:
+Map is flexible enough to accommodate a variety of different data representations. There are two other (optional) template parameters:
\code
Map<typename MatrixType,
int MapOptions,
diff --git a/doc/TutorialSparse.dox b/doc/TutorialSparse.dox
index 352907408..350ea1139 100644
--- a/doc/TutorialSparse.dox
+++ b/doc/TutorialSparse.dox
@@ -57,7 +57,7 @@ The \c "_" indicates available free space to quickly insert new elements.
Assuming no reallocation is needed, the insertion of a random element is therefore in O(nnz_j) where nnz_j is the number of nonzeros of the respective inner vector.
On the other hand, inserting elements with increasing inner indices in a given inner vector is much more efficient since this only requires to increase the respective \c InnerNNZs entry that is a O(1) operation.
-The case where no empty space is available is a special case, and is refered as the \em compressed mode.
+The case where no empty space is available is a special case, and is referred as the \em compressed mode.
It corresponds to the widely used Compressed Column (or Row) Storage schemes (CCS or CRS).
Any SparseMatrix can be turned to this form by calling the SparseMatrix::makeCompressed() function.
In this case, one can remark that the \c InnerNNZs array is redundant with \c OuterStarts because we the equality: \c InnerNNZs[j] = \c OuterStarts[j+1]-\c OuterStarts[j].
@@ -212,7 +212,7 @@ See the SparseMatrix::setFromTriplets() function and class Triplet for more deta
In some cases, however, slightly higher performance, and lower memory consumption can be reached by directly inserting the non-zeros into the destination matrix.
-A typical scenario of this approach is illustrated bellow:
+A typical scenario of this approach is illustrated below:
\code
1: SparseMatrix<double> mat(rows,cols); // default is column major
2: mat.reserve(VectorXi::Constant(cols,6));
diff --git a/doc/UnalignedArrayAssert.dox b/doc/UnalignedArrayAssert.dox
index 0f7022973..8676faa1b 100644
--- a/doc/UnalignedArrayAssert.dox
+++ b/doc/UnalignedArrayAssert.dox
@@ -117,8 +117,8 @@ It doesn't disable 16-byte alignment, because that would mean that vectorized an
\section checkmycode How can I check my code is safe regarding alignment issues?
-Unfortunately, there is no possibility in C++ to detect any of the aformentioned shortcoming at compile time (though static analysers are becoming more and more powerful and could detect some of them).
-Even at runtime, all we can do is to catch invalid unaligned allocation and trigger the explicit assertion mentioned at the begining of this page.
+Unfortunately, there is no possibility in C++ to detect any of the aforementioned shortcoming at compile time (though static analysers are becoming more and more powerful and could detect some of them).
+Even at runtime, all we can do is to catch invalid unaligned allocation and trigger the explicit assertion mentioned at the beginning of this page.
Therefore, if your program runs fine on a given system with some given compilation flags, then this does not guarantee that your code is safe. For instance, on most 64 bits systems buffer are aligned on 16 bytes boundary and so, if you do not enable AVX instruction set, then your code will run fine. On the other hand, the same code may assert if moving to a more exotic platform, or enabling AVX instructions that required 32 bytes alignment by default.
The situation is not hopeless though. Assuming your code is well covered by unit test, then you can check its alignment safety by linking it to a custom malloc library returning 8 bytes aligned buffers only. This way all alignment shortcomings should pop-up. To this end, you must also compile your program with \link TopicPreprocessorDirectivesPerformance EIGEN_MALLOC_ALREADY_ALIGNED=0 \endlink.
diff --git a/doc/UsingNVCC.dox b/doc/UsingNVCC.dox
index 9bcdf0bfc..36beb2ddd 100644
--- a/doc/UsingNVCC.dox
+++ b/doc/UsingNVCC.dox
@@ -5,7 +5,7 @@ namespace Eigen {
Staring from CUDA 5.5 and Eigen 3.3, it is possible to use Eigen's matrices, vectors, and arrays for fixed size within CUDA kernels. This is especially useful when working on numerous but small problems. By default, when Eigen's headers are included within a .cu file compiled by nvcc most Eigen's functions and methods are prefixed by the \c __device__ \c __host__ keywords making them callable from both host and device code.
This support can be disabled by defining \c EIGEN_NO_CUDA before including any Eigen's header.
-This might be usefull to disable some warnings when a .cu file makes use of Eigen on the host side only.
+This might be useful to disable some warnings when a .cu file makes use of Eigen on the host side only.
However, in both cases, host's SIMD vectorization has to be disabled in .cu files.
It is thus \b strongly \b recommended to properly move all costly host computation from your .cu files to regular .cpp files.
diff --git a/doc/eigendoxy.css b/doc/eigendoxy.css
index 6ce2b839b..b99d7914a 100644
--- a/doc/eigendoxy.css
+++ b/doc/eigendoxy.css
@@ -93,7 +93,7 @@ table th.inter {
border-color: #cccccc;
}
-/** class for exemple / output tables **/
+/** class for example / output tables **/
table.example {
}
diff --git a/doc/special_examples/Tutorial_sparse_example.cpp b/doc/special_examples/Tutorial_sparse_example.cpp
index 830e196ea..89937b411 100644
--- a/doc/special_examples/Tutorial_sparse_example.cpp
+++ b/doc/special_examples/Tutorial_sparse_example.cpp
@@ -12,7 +12,7 @@ int main(int argc, char** argv)
assert(argc==2);
int n = 300; // size of the image
- int m = n*n; // number of unknows (=number of pixels)
+ int m = n*n; // number of unknowns (=number of pixels)
// Assembly:
std::vector<T> coefficients; // list of non-zeros coefficients
diff --git a/lapack/CMakeLists.txt b/lapack/CMakeLists.txt
index 9883d4c72..52f18edfc 100644
--- a/lapack/CMakeLists.txt
+++ b/lapack/CMakeLists.txt
@@ -35,7 +35,7 @@ set(EigenLapack_SRCS ${EigenLapack_SRCS}
second_NONE.f dsecnd_NONE.f
)
-option(EIGEN_ENABLE_LAPACK_TESTS OFF "Enbale the Lapack unit tests")
+option(EIGEN_ENABLE_LAPACK_TESTS OFF "Enable the Lapack unit tests")
if(EIGEN_ENABLE_LAPACK_TESTS)
@@ -59,7 +59,7 @@ if(EIGEN_ENABLE_LAPACK_TESTS)
message(STATUS "Setup lapack reference and lapack unit tests")
execute_process(COMMAND tar xzf "lapack_addons_3.4.1.tgz" WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR})
else()
- message(STATUS "Download of lapack_addons_3.4.1.tgz failed, LAPACK unit tests wont be enabled")
+ message(STATUS "Download of lapack_addons_3.4.1.tgz failed, LAPACK unit tests won't be enabled")
set(EIGEN_ENABLE_LAPACK_TESTS false)
endif()
diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt
index 8bd086ce3..8bcf3f7c5 100644
--- a/test/CMakeLists.txt
+++ b/test/CMakeLists.txt
@@ -1,5 +1,5 @@
# generate split test header file only if it does not yet exist
-# in order to prevent a rebuild everytime cmake is configured
+# in order to prevent a rebuild every time cmake is configured
if(NOT EXISTS ${CMAKE_CURRENT_BINARY_DIR}/split_test_helper.h)
file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/split_test_helper.h "")
foreach(i RANGE 1 999)
diff --git a/test/bdcsvd.cpp b/test/bdcsvd.cpp
index 6c7b09696..109218766 100644
--- a/test/bdcsvd.cpp
+++ b/test/bdcsvd.cpp
@@ -104,7 +104,7 @@ void test_bdcsvd()
CALL_SUBTEST_7( BDCSVD<MatrixXf>(10,10) );
// Check that preallocation avoids subsequent mallocs
- // Disbaled because not supported by BDCSVD
+ // Disabled because not supported by BDCSVD
// CALL_SUBTEST_9( svd_preallocate<void>() );
CALL_SUBTEST_2( svd_underoverflow<void>() );
diff --git a/test/eigensolver_complex.cpp b/test/eigensolver_complex.cpp
index 293b1b265..03d5774ef 100644
--- a/test/eigensolver_complex.cpp
+++ b/test/eigensolver_complex.cpp
@@ -47,7 +47,7 @@ template<typename MatrixType> bool find_pivot(typename MatrixType::Scalar tol, M
return false;
}
-/* Check that two column vectors are approximately equal upto permutations.
+/* Check that two column vectors are approximately equal up to permutations.
* Initially, this method checked that the k-th power sums are equal for all k = 1, ..., vec1.rows(),
* however this strategy is numerically inacurate because of numerical cancellation issues.
*/
diff --git a/test/geo_quaternion.cpp b/test/geo_quaternion.cpp
index 8ee8fdb27..5854d39c5 100644
--- a/test/geo_quaternion.cpp
+++ b/test/geo_quaternion.cpp
@@ -241,7 +241,7 @@ template<typename Scalar> void mapQuaternion(void){
const MQuaternionUA& cmq3(mq3);
VERIFY( &cmq3.x() == &mq3.x() );
// FIXME the following should be ok. The problem is that currently the LValueBit flag
- // is used to determine wether we can return a coeff by reference or not, which is not enough for Map<const ...>.
+ // is used to determine whether we can return a coeff by reference or not, which is not enough for Map<const ...>.
//const MCQuaternionUA& cmcq3(mcq3);
//VERIFY( &cmcq3.x() == &mcq3.x() );
}
diff --git a/test/main.h b/test/main.h
index 6079cbd06..14f4e3e7a 100644
--- a/test/main.h
+++ b/test/main.h
@@ -183,7 +183,7 @@ namespace Eigen
};
}
// If EIGEN_DEBUG_ASSERTS is defined and if no assertion is triggered while
- // one should have been, then the list of excecuted assertions is printed out.
+ // one should have been, then the list of executed assertions is printed out.
//
// EIGEN_DEBUG_ASSERTS is not enabled by default as it
// significantly increases the compilation time
diff --git a/test/packetmath.cpp b/test/packetmath.cpp
index 08b360340..3c11df7e8 100644
--- a/test/packetmath.cpp
+++ b/test/packetmath.cpp
@@ -28,7 +28,7 @@ template<typename T> T negate(const T& x) { return -x; }
}
}
-// NOTE: we disbale inlining for this function to workaround a GCC issue when using -O3 and the i387 FPU.
+// NOTE: we disable inlining for this function to workaround a GCC issue when using -O3 and the i387 FPU.
template<typename Scalar> EIGEN_DONT_INLINE
bool isApproxAbs(const Scalar& a, const Scalar& b, const typename NumTraits<Scalar>::Real& refvalue)
{