aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar Gael Guennebaud <g.gael@free.fr>2018-06-07 14:43:02 +0200
committerGravatar Gael Guennebaud <g.gael@free.fr>2018-06-07 14:43:02 +0200
commitb3fd93207bea08fc459be46f84fd6bbe18b19523 (patch)
treea4607c9e0c086ada062606cd04c54ed6255b412a
parent405859f18dac56f324e1d93ca8721d5f7fd22c62 (diff)
Fix typos found using codespell
-rw-r--r--Eigen/src/Core/Solve.h2
-rw-r--r--Eigen/src/Core/products/Parallelizer.h2
-rw-r--r--Eigen/src/IterativeLinearSolvers/SolveWithGuess.h2
-rw-r--r--Eigen/src/SparseCholesky/SimplicialCholesky_impl.h2
-rw-r--r--test/indexed_view.cpp2
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/README.md2
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/Tensor.h2
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorBase.h2
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorContractionBlocking.h2
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h4
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorCostModel.h2
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h12
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorFFT.h2
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h2
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorImagePatch.h4
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorIntDiv.h2
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorReductionSycl.h4
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorRef.h2
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractFunctors.h2
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorSyclFunctors.h8
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorSyclTuple.h4
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorVolumePatch.h2
-rw-r--r--unsupported/Eigen/CXX11/src/TensorSymmetry/util/TemplateGroupTheory.h2
-rw-r--r--unsupported/Eigen/CXX11/src/ThreadPool/EventCount.h8
-rw-r--r--unsupported/Eigen/CXX11/src/ThreadPool/RunQueue.h2
-rw-r--r--unsupported/Eigen/CXX11/src/util/EmulateArray.h2
-rw-r--r--unsupported/Eigen/NonLinearOptimization4
-rw-r--r--unsupported/Eigen/OpenGLSupport2
-rw-r--r--unsupported/Eigen/src/BVH/KdBVH.h2
-rw-r--r--unsupported/Eigen/src/Eigenvalues/ArpackSelfAdjointEigenSolver.h2
-rw-r--r--unsupported/Eigen/src/EulerAngles/EulerSystem.h2
-rw-r--r--unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h2
-rw-r--r--unsupported/Eigen/src/IterativeSolvers/DGMRES.h4
-rw-r--r--unsupported/Eigen/src/LevenbergMarquardt/LMqrsolv.h2
-rw-r--r--unsupported/Eigen/src/LevenbergMarquardt/LevenbergMarquardt.h4
-rw-r--r--unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h2
-rw-r--r--unsupported/Eigen/src/MatrixFunctions/MatrixPower.h2
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/qrsolv.h2
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/r1updt.h2
-rw-r--r--unsupported/Eigen/src/Polynomials/Companion.h4
-rw-r--r--unsupported/Eigen/src/Skyline/SkylineInplaceLU.h2
-rw-r--r--unsupported/Eigen/src/Skyline/SkylineMatrix.h16
-rw-r--r--unsupported/Eigen/src/SparseExtra/DynamicSparseMatrix.h8
-rw-r--r--unsupported/Eigen/src/SparseExtra/MarketIO.h2
-rw-r--r--unsupported/Eigen/src/Splines/SplineFitting.h2
-rw-r--r--unsupported/README.txt2
-rw-r--r--unsupported/bench/bench_svd.cpp2
-rw-r--r--unsupported/test/CMakeLists.txt2
-rw-r--r--unsupported/test/autodiff_scalar.cpp2
-rw-r--r--unsupported/test/cxx11_tensor_inflation_sycl.cpp4
-rw-r--r--unsupported/test/cxx11_tensor_of_float16_cuda.cu2
-rw-r--r--unsupported/test/cxx11_tensor_random_cuda.cu2
-rw-r--r--unsupported/test/forward_adolc.cpp2
-rw-r--r--unsupported/test/sparse_extra.cpp2
54 files changed, 84 insertions, 84 deletions
diff --git a/Eigen/src/Core/Solve.h b/Eigen/src/Core/Solve.h
index a8daea511..2bf940a26 100644
--- a/Eigen/src/Core/Solve.h
+++ b/Eigen/src/Core/Solve.h
@@ -181,7 +181,7 @@ struct Assignment<DstXprType, Solve<CwiseUnaryOp<internal::scalar_conjugate_op<t
}
};
-} // end namepsace internal
+} // end namespace internal
} // end namespace Eigen
diff --git a/Eigen/src/Core/products/Parallelizer.h b/Eigen/src/Core/products/Parallelizer.h
index e4d13103b..15b5c5f94 100644
--- a/Eigen/src/Core/products/Parallelizer.h
+++ b/Eigen/src/Core/products/Parallelizer.h
@@ -91,7 +91,7 @@ void parallelize_gemm(const Functor& func, Index rows, Index cols, Index depth,
// FIXME the transpose variable is only needed to properly split
// the matrix product when multithreading is enabled. This is a temporary
// fix to support row-major destination matrices. This whole
- // parallelizer mechanism has to be redisigned anyway.
+ // parallelizer mechanism has to be redesigned anyway.
EIGEN_UNUSED_VARIABLE(depth);
EIGEN_UNUSED_VARIABLE(transpose);
func(0,rows, 0,cols);
diff --git a/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h b/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h
index 0ace45177..79e1e4819 100644
--- a/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h
+++ b/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h
@@ -108,7 +108,7 @@ struct Assignment<DstXprType, SolveWithGuess<DecType,RhsType,GuessType>, interna
}
};
-} // end namepsace internal
+} // end namespace internal
} // end namespace Eigen
diff --git a/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h b/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h
index 84a1bf2bd..0aa92f8bc 100644
--- a/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h
+++ b/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h
@@ -5,7 +5,7 @@
/*
-NOTE: thes functions vave been adapted from the LDL library:
+NOTE: these functions have been adapted from the LDL library:
LDL Copyright (c) 2005 by Timothy A. Davis. All Rights Reserved.
diff --git a/test/indexed_view.cpp b/test/indexed_view.cpp
index 033d8833f..2d46ffd6b 100644
--- a/test/indexed_view.cpp
+++ b/test/indexed_view.cpp
@@ -140,7 +140,7 @@ void check_indexed_view()
"500 501 502 503 504 505 506 507 508 509")
);
- // takes the row numer 3, and repeat it 5 times
+ // take row number 3, and repeat it 5 times
VERIFY( MATCH( A(seqN(3,5,0), all),
"300 301 302 303 304 305 306 307 308 309\n"
"300 301 302 303 304 305 306 307 308 309\n"
diff --git a/unsupported/Eigen/CXX11/src/Tensor/README.md b/unsupported/Eigen/CXX11/src/Tensor/README.md
index 49cc33c5d..dfd7ab7c7 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/README.md
+++ b/unsupported/Eigen/CXX11/src/Tensor/README.md
@@ -581,7 +581,7 @@ is not initialized.
Creates a tensor mapping an existing array of data. The data must not be freed
until the TensorMap is discarded, and the size of the data must be large enough
-to accomodate of the coefficients of the tensor.
+to accommodate the coefficients of the tensor.
float data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
Eigen::TensorMap<Tensor<float, 2>> a(data, 3, 4);
diff --git a/unsupported/Eigen/CXX11/src/Tensor/Tensor.h b/unsupported/Eigen/CXX11/src/Tensor/Tensor.h
index 1940a9692..4fd96448f 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/Tensor.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/Tensor.h
@@ -48,7 +48,7 @@ namespace Eigen {
*
* <dl>
* <dt><b>Relation to other parts of Eigen:</b></dt>
- * <dd>The midterm developement goal for this class is to have a similar hierarchy as Eigen uses for matrices, so that
+ * <dd>The midterm development goal for this class is to have a similar hierarchy as Eigen uses for matrices, so that
* taking blocks or using tensors in expressions is easily possible, including an interface with the vector/matrix code
* by providing .asMatrix() and .asVector() (or similar) methods for rank 2 and 1 tensors. However, currently, the %Tensor
* class does not provide any of these features and is only available as a stand-alone class that just allows for
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorBase.h b/unsupported/Eigen/CXX11/src/Tensor/TensorBase.h
index a942c98dd..d88e0df71 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorBase.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorBase.h
@@ -20,7 +20,7 @@ namespace Eigen {
* \brief The tensor base class.
*
* This class is the common parent of the Tensor and TensorMap class, thus
- * making it possible to use either class interchangably in expressions.
+ * making it possible to use either class interchangeably in expressions.
*/
template<typename Derived>
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionBlocking.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionBlocking.h
index d34f9caee..639d99f9d 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionBlocking.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionBlocking.h
@@ -75,7 +75,7 @@ class TensorXsmmContractionBlocking {
outer_n_ = outer_n_ != 0 ? outer_n_ : n;
}
#else
- // Defaults, possibly overriden per-platform.
+ // Defaults, possibly overridden per-platform.
copyA_ = true;
copyB_ = false;
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
index d30cc96ab..3c007b183 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h
@@ -350,7 +350,7 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
// Normal number of notifications for k slice switch is
// nm_ + nn_ + nm_ * nn_. However, first P - 1 slices will receive only
// nm_ + nn_ notifications, because they will not receive notifications
- // from preceeding kernels.
+ // from preceding kernels.
state_switch_[x] =
x == 0
? 1
@@ -530,7 +530,7 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
void kernel(Index m, Index n, Index k) {
// Note: order of iteration matters here. Iteration over m is innermost
- // because we want to reuse the same packed rhs in consequetive tasks
+ // because we want to reuse the same packed rhs in consecutive tasks
// (rhs fits into L2$ while lhs only into L3$).
const Index nend = n * gn_ + gn(n);
const Index mend = m * gm_ + gm(m);
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorCostModel.h b/unsupported/Eigen/CXX11/src/Tensor/TensorCostModel.h
index b148dae39..bb63baee2 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorCostModel.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorCostModel.h
@@ -195,7 +195,7 @@ class TensorCostModel {
// 11 is L2 cache latency on Haswell.
// We don't know whether data is in L1, L2 or L3. But we are most interested
// in single-threaded computational time around 100us-10ms (smaller time
- // is too small for parallelization, larger time is not intersting
+ // is too small for parallelization, larger time is not interesting
// either because we are probably using all available threads already).
// And for the target time range, L2 seems to be what matters. Data set
// fitting into L1 is too small to take noticeable time. Data set fitting
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h b/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h
index 6158acbd9..e7beb2c82 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h
@@ -286,7 +286,7 @@ m_queue(cl::sycl::queue(s, [&](cl::sycl::exception_list l) {
tileSize =static_cast<Index>(m_queue.get_device(). template get_info<cl::sycl::info::device::max_work_group_size>());
auto s= m_queue.get_device().template get_info<cl::sycl::info::device::vendor>();
std::transform(s.begin(), s.end(), s.begin(), ::tolower);
- if(m_queue.get_device().is_cpu()){ // intel doesnot allow to use max workgroup size
+ if(m_queue.get_device().is_cpu()){ // intel doesn't allow to use max workgroup size
tileSize=std::min(static_cast<Index>(256), static_cast<Index>(tileSize));
}
rng = n;
@@ -303,7 +303,7 @@ m_queue(cl::sycl::queue(s, [&](cl::sycl::exception_list l) {
template<typename Index>
EIGEN_STRONG_INLINE void parallel_for_setup(Index dim0, Index dim1, Index &tileSize0, Index &tileSize1, Index &rng0, Index &rng1, Index &GRange0, Index &GRange1) const {
Index max_workgroup_Size = static_cast<Index>(maxSyclThreadsPerBlock());
- if(m_queue.get_device().is_cpu()){ // intel doesnot allow to use max workgroup size
+ if(m_queue.get_device().is_cpu()){ // intel doesn't allow to use max workgroup size
max_workgroup_Size=std::min(static_cast<Index>(256), static_cast<Index>(max_workgroup_Size));
}
Index pow_of_2 = static_cast<Index>(std::log2(max_workgroup_Size));
@@ -331,7 +331,7 @@ m_queue(cl::sycl::queue(s, [&](cl::sycl::exception_list l) {
template<typename Index>
EIGEN_STRONG_INLINE void parallel_for_setup(Index dim0, Index dim1,Index dim2, Index &tileSize0, Index &tileSize1, Index &tileSize2, Index &rng0, Index &rng1, Index &rng2, Index &GRange0, Index &GRange1, Index &GRange2) const {
Index max_workgroup_Size = static_cast<Index>(maxSyclThreadsPerBlock());
- if(m_queue.get_device().is_cpu()){ // intel doesnot allow to use max workgroup size
+ if(m_queue.get_device().is_cpu()){ // intel doesn't allow to use max workgroup size
max_workgroup_Size=std::min(static_cast<Index>(256), static_cast<Index>(max_workgroup_Size));
}
Index pow_of_2 = static_cast<Index>(std::log2(max_workgroup_Size));
@@ -377,7 +377,7 @@ m_queue(cl::sycl::queue(s, [&](cl::sycl::exception_list l) {
EIGEN_STRONG_INLINE int majorDeviceVersion() const { return 1; }
EIGEN_STRONG_INLINE unsigned long maxSyclThreadsPerMultiProcessor() const {
- // OpenCL doesnot have such concept
+ // OpenCL doesn't have such concept
return 2;
}
@@ -519,7 +519,7 @@ struct SyclDevice {
return m_queue_stream->maxSyclThreadsPerBlock();
}
EIGEN_STRONG_INLINE unsigned long maxSyclThreadsPerMultiProcessor() const {
- // OpenCL doesnot have such concept
+ // OpenCL doesn't have such concept
return m_queue_stream->maxSyclThreadsPerMultiProcessor();
// return stream_->deviceProperties().maxThreadsPerMultiProcessor;
}
@@ -544,7 +544,7 @@ struct SyclDevice {
};
// This is used as a distingushable device inside the kernel as the sycl device class is not Standard layout.
// This is internal and must not be used by user. This dummy device allow us to specialise the tensor evaluator
-// inside the kenrel. So we can have two types of eval for host and device. This is required for TensorArgMax operation
+// inside the kernel. So we can have two types of eval for host and device. This is required for TensorArgMax operation
struct SyclKernelDevice:DefaultDevice{};
} // end namespace Eigen
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorFFT.h b/unsupported/Eigen/CXX11/src/Tensor/TensorFFT.h
index f81da318c..d6ab4d997 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorFFT.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorFFT.h
@@ -274,7 +274,7 @@ struct TensorEvaluator<const TensorFFTOp<FFT, ArgType, FFTResultType, FFTDir>, D
}
}
- // processs the line
+ // process the line
if (is_power_of_two) {
processDataLineCooleyTukey(line_buf, line_len, log_len);
}
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h b/unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h
index 354bbe8d1..6c237bac3 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h
@@ -12,7 +12,7 @@
namespace Eigen {
-// MakePointer class is used as a container of the adress space of the pointer
+// MakePointer class is used as a container of the address space of the pointer
// on the host and on the device. From the host side it generates the T* pointer
// and when EIGEN_USE_SYCL is used it construct a buffer with a map_allocator to
// T* m_data on the host. It is always called on the device.
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorImagePatch.h b/unsupported/Eigen/CXX11/src/Tensor/TensorImagePatch.h
index 91d4ead28..f0f7c7826 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorImagePatch.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorImagePatch.h
@@ -272,8 +272,8 @@ struct TensorEvaluator<const TensorImagePatchOp<Rows, Cols, ArgType>, Device>
break;
default:
eigen_assert(false && "unexpected padding");
- m_outputCols=0; // silence the uninitialised warnig;
- m_outputRows=0; //// silence the uninitialised warnig;
+ m_outputCols=0; // silence the uninitialised warning;
+ m_outputRows=0; //// silence the uninitialised warning;
}
}
eigen_assert(m_outputRows > 0);
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorIntDiv.h b/unsupported/Eigen/CXX11/src/Tensor/TensorIntDiv.h
index bf5c532ff..25ba2001e 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorIntDiv.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorIntDiv.h
@@ -167,7 +167,7 @@ struct TensorIntDivisor {
shift2 = log_div > 1 ? log_div-1 : 0;
}
- // Must have 0 <= numerator. On platforms that dont support the __uint128_t
+ // Must have 0 <= numerator. On platforms that don't support the __uint128_t
// type numerator should also be less than 2^32-1.
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T divide(const T numerator) const {
eigen_assert(static_cast<typename UnsignedTraits<T>::type>(numerator) < NumTraits<UnsignedType>::highest()/2);
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorReductionSycl.h b/unsupported/Eigen/CXX11/src/Tensor/TensorReductionSycl.h
index 94899252b..a379f5a94 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorReductionSycl.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorReductionSycl.h
@@ -106,7 +106,7 @@ struct FullReducer<Self, Op, const Eigen::SyclDevice, Vectorizable> {
/// if the shared memory is less than the GRange, we set shared_mem size to the TotalSize and in this case one kernel would be created for recursion to reduce all to one.
if (GRange < outTileSize) outTileSize=GRange;
/// creating the shared memory for calculating reduction.
- /// This one is used to collect all the reduced value of shared memory as we dont have global barrier on GPU. Once it is saved we can
+ /// This one is used to collect all the reduced value of shared memory as we don't have global barrier on GPU. Once it is saved we can
/// recursively apply reduction on it in order to reduce the whole.
auto temp_global_buffer =cl::sycl::buffer<CoeffReturnType, 1>(cl::sycl::range<1>(GRange));
typedef typename Eigen::internal::remove_all<decltype(self.xprDims())>::type Dims;
@@ -150,7 +150,7 @@ struct InnerReducer<Self, Op, const Eigen::SyclDevice> {
// getting final out buffer at the moment the created buffer is true because there is no need for assign
/// creating the shared memory for calculating reduction.
- /// This one is used to collect all the reduced value of shared memory as we dont have global barrier on GPU. Once it is saved we can
+ /// This one is used to collect all the reduced value of shared memory as we don't have global barrier on GPU. Once it is saved we can
/// recursively apply reduction on it in order to reduce the whole.
dev.parallel_for_setup(num_coeffs_to_preserve, tileSize, range, GRange);
dev.sycl_queue().submit([&](cl::sycl::handler &cgh) {
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorRef.h b/unsupported/Eigen/CXX11/src/Tensor/TensorRef.h
index 99245f778..b2b4fd8d3 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorRef.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorRef.h
@@ -31,7 +31,7 @@ class TensorLazyBaseEvaluator {
int refCount() const { return m_refcount; }
private:
- // No copy, no assigment;
+ // No copy, no assignment;
TensorLazyBaseEvaluator(const TensorLazyBaseEvaluator& other);
TensorLazyBaseEvaluator& operator = (const TensorLazyBaseEvaluator& other);
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractFunctors.h b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractFunctors.h
index a7905706d..a248e303b 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractFunctors.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractFunctors.h
@@ -117,7 +117,7 @@ SYCLEXTRFUNCTERNARY()
-//TensorCustomOp must be specialised otherewise it will be captured by UnaryCategory while its action is different
+//TensorCustomOp must be specialised otherwise it will be captured by UnaryCategory while its action is different
//from the UnaryCategory and it is similar to the general FunctorExtractor.
/// specialisation of TensorCustomOp
#define SYCLEXTRFUNCCUSTOMUNARYOP(CVQual)\
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclFunctors.h b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclFunctors.h
index e5b892f2e..a447c3f88 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclFunctors.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclFunctors.h
@@ -80,7 +80,7 @@ template < typename HostExpr, typename FunctorExpr, typename Tuple_of_Acc, typen
typedef typename ConvertToDeviceExpression<const HostExpr>::Type DevExpr;
auto device_expr = createDeviceExpression<DevExpr, PlaceHolderExpr>(functors, tuple_of_accessors);
/// reduction cannot be captured automatically through our device conversion recursion. The reason is that reduction has two behaviour
- /// the first behaviour is when it is used as a root to lauch the sub-kernel. The second one is when it is treated as a leafnode to pass the
+ /// the first behaviour is when it is used as a root to launch the sub-kernel. The second one is when it is treated as a leafnode to pass the
/// calculated result to its parent kernel. While the latter is automatically detected through our device expression generator. The former is created here.
const auto device_self_expr= Eigen::TensorReductionOp<Op, Dims, decltype(device_expr.expr) ,MakeGlobalPointer>(device_expr.expr, dims, functor);
/// This is the evaluator for device_self_expr. This is exactly similar to the self which has been passed to run function. The difference is
@@ -121,7 +121,7 @@ class ReductionFunctor<HostExpr, FunctorExpr, Tuple_of_Acc, Dims, Eigen::interna
typedef typename ConvertToDeviceExpression<const HostExpr>::Type DevExpr;
auto device_expr = createDeviceExpression<DevExpr, PlaceHolderExpr>(functors, tuple_of_accessors);
/// reduction cannot be captured automatically through our device conversion recursion. The reason is that reduction has two behaviour
- /// the first behaviour is when it is used as a root to lauch the sub-kernel. The second one is when it is treated as a leafnode to pass the
+ /// the first behaviour is when it is used as a root to launch the sub-kernel. The second one is when it is treated as a leafnode to pass the
/// calculated result to its parent kernel. While the latter is automatically detected through our device expression generator. The former is created here.
const auto device_self_expr= Eigen::TensorReductionOp<Op, Dims, decltype(device_expr.expr) ,MakeGlobalPointer>(device_expr.expr, dims, functor);
/// This is the evaluator for device_self_expr. This is exactly similar to the self which has been passed to run function. The difference is
@@ -168,7 +168,7 @@ public:
typedef typename TensorSycl::internal::ConvertToDeviceExpression<const HostExpr>::Type DevExpr;
auto device_expr = TensorSycl::internal::createDeviceExpression<DevExpr, PlaceHolderExpr>(functors, tuple_of_accessors);
/// reduction cannot be captured automatically through our device conversion recursion. The reason is that reduction has two behaviour
- /// the first behaviour is when it is used as a root to lauch the sub-kernel. The second one is when it is treated as a leafnode to pass the
+ /// the first behaviour is when it is used as a root to launch the sub-kernel. The second one is when it is treated as a leafnode to pass the
/// calculated result to its parent kernel. While the latter is automatically detected through our device expression generator. The former is created here.
const auto device_self_expr= Eigen::TensorReductionOp<Op, Dims, decltype(device_expr.expr) ,MakeGlobalPointer>(device_expr.expr, dims, op);
/// This is the evaluator for device_self_expr. This is exactly similar to the self which has been passed to run function. The difference is
@@ -215,7 +215,7 @@ public:
typedef typename TensorSycl::internal::ConvertToDeviceExpression<const HostExpr>::Type DevExpr;
auto device_expr = TensorSycl::internal::createDeviceExpression<DevExpr, PlaceHolderExpr>(functors, tuple_of_accessors);
/// reduction cannot be captured automatically through our device conversion recursion. The reason is that reduction has two behaviour
- /// the first behaviour is when it is used as a root to lauch the sub-kernel. The second one is when it is treated as a leafnode to pass the
+ /// the first behaviour is when it is used as a root to launch the sub-kernel. The second one is when it is treated as a leafnode to pass the
/// calculated result to its parent kernel. While the latter is automatically detected through our device expression generator. The former is created here.
const auto device_self_expr= Eigen::TensorReductionOp<Op, Dims, decltype(device_expr.expr) ,MakeGlobalPointer>(device_expr.expr, dims, op);
/// This is the evaluator for device_self_expr. This is exactly similar to the self which has been passed to run function. The difference is
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclTuple.h b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclTuple.h
index 58ab0f0d5..9e6c3e4fa 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclTuple.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclTuple.h
@@ -143,7 +143,7 @@ struct IndexList {};
/// \brief Collects internal details for generating index ranges [MIN, MAX)
/// Declare primary template for index range builder
/// \tparam MIN is the starting index in the tuple
-/// \tparam N represents sizeof..(elemens)- sizeof...(Is)
+/// \tparam N represents sizeof..(elements)- sizeof...(Is)
/// \tparam Is... are the list of generated index so far
template <size_t MIN, size_t N, size_t... Is>
struct RangeBuilder;
@@ -161,7 +161,7 @@ struct RangeBuilder<MIN, MIN, Is...> {
/// in this case we are recursively subtracting N by one and adding one
/// index to Is... list until MIN==N
/// \tparam MIN is the starting index in the tuple
-/// \tparam N represents sizeof..(elemens)- sizeof...(Is)
+/// \tparam N represents sizeof..(elements)- sizeof...(Is)
/// \tparam Is... are the list of generated index so far
template <size_t MIN, size_t N, size_t... Is>
struct RangeBuilder : public RangeBuilder<MIN, N - 1, N - 1, Is...> {};
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorVolumePatch.h b/unsupported/Eigen/CXX11/src/Tensor/TensorVolumePatch.h
index 51c099591..ef199bfb6 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorVolumePatch.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorVolumePatch.h
@@ -568,7 +568,7 @@ struct TensorEvaluator<const TensorVolumePatchOp<Planes, Rows, Cols, ArgType>, D
Dimensions m_dimensions;
- // Parameters passed to the costructor.
+ // Parameters passed to the constructor.
Index m_plane_strides;
Index m_row_strides;
Index m_col_strides;
diff --git a/unsupported/Eigen/CXX11/src/TensorSymmetry/util/TemplateGroupTheory.h b/unsupported/Eigen/CXX11/src/TensorSymmetry/util/TemplateGroupTheory.h
index 0fe0b7c46..04d6d6b23 100644
--- a/unsupported/Eigen/CXX11/src/TensorSymmetry/util/TemplateGroupTheory.h
+++ b/unsupported/Eigen/CXX11/src/TensorSymmetry/util/TemplateGroupTheory.h
@@ -241,7 +241,7 @@ struct dimino_first_step_elements
* multiplying all elements in the given subgroup with the new
* coset representative. Note that the first element of the
* subgroup is always the identity element, so the first element of
- * ther result of this template is going to be the coset
+ * the result of this template is going to be the coset
* representative itself.
*
* Note that this template accepts an additional boolean parameter
diff --git a/unsupported/Eigen/CXX11/src/ThreadPool/EventCount.h b/unsupported/Eigen/CXX11/src/ThreadPool/EventCount.h
index 71d55552d..0a7181102 100644
--- a/unsupported/Eigen/CXX11/src/ThreadPool/EventCount.h
+++ b/unsupported/Eigen/CXX11/src/ThreadPool/EventCount.h
@@ -33,10 +33,10 @@ namespace Eigen {
// ec.Notify(true);
//
// Notify is cheap if there are no waiting threads. Prewait/CommitWait are not
-// cheap, but they are executed only if the preceeding predicate check has
+// cheap, but they are executed only if the preceding predicate check has
// failed.
//
-// Algorihtm outline:
+// Algorithm outline:
// There are two main variables: predicate (managed by user) and state_.
// Operation closely resembles Dekker mutual algorithm:
// https://en.wikipedia.org/wiki/Dekker%27s_algorithm
@@ -79,7 +79,7 @@ class EventCount {
uint64_t state = state_.load(std::memory_order_seq_cst);
for (;;) {
if (int64_t((state & kEpochMask) - epoch) < 0) {
- // The preceeding waiter has not decided on its fate. Wait until it
+ // The preceding waiter has not decided on its fate. Wait until it
// calls either CancelWait or CommitWait, or is notified.
EIGEN_THREAD_YIELD();
state = state_.load(std::memory_order_seq_cst);
@@ -110,7 +110,7 @@ class EventCount {
uint64_t state = state_.load(std::memory_order_relaxed);
for (;;) {
if (int64_t((state & kEpochMask) - epoch) < 0) {
- // The preceeding waiter has not decided on its fate. Wait until it
+ // The preceding waiter has not decided on its fate. Wait until it
// calls either CancelWait or CommitWait, or is notified.
EIGEN_THREAD_YIELD();
state = state_.load(std::memory_order_relaxed);
diff --git a/unsupported/Eigen/CXX11/src/ThreadPool/RunQueue.h b/unsupported/Eigen/CXX11/src/ThreadPool/RunQueue.h
index 49d0cdc36..cb3690a2e 100644
--- a/unsupported/Eigen/CXX11/src/ThreadPool/RunQueue.h
+++ b/unsupported/Eigen/CXX11/src/ThreadPool/RunQueue.h
@@ -198,7 +198,7 @@ class RunQueue {
};
std::mutex mutex_;
// Low log(kSize) + 1 bits in front_ and back_ contain rolling index of
- // front/back, repsectively. The remaining bits contain modification counters
+ // front/back, respectively. The remaining bits contain modification counters
// that are incremented on Push operations. This allows us to (1) distinguish
// between empty and full conditions (if we would use log(kSize) bits for
// position, these conditions would be indistinguishable); (2) obtain
diff --git a/unsupported/Eigen/CXX11/src/util/EmulateArray.h b/unsupported/Eigen/CXX11/src/util/EmulateArray.h
index 96b3a8261..ddd54f4b3 100644
--- a/unsupported/Eigen/CXX11/src/util/EmulateArray.h
+++ b/unsupported/Eigen/CXX11/src/util/EmulateArray.h
@@ -219,7 +219,7 @@ template<class T, std::size_t N> struct array_size<const array<T,N>& > {
#else
-// The compiler supports c++11, and we're not targetting cuda: use std::array as Eigen::array
+// The compiler supports c++11, and we're not targeting cuda: use std::array as Eigen::array
#include <array>
namespace Eigen {
diff --git a/unsupported/Eigen/NonLinearOptimization b/unsupported/Eigen/NonLinearOptimization
index 600ab4c12..c22b89054 100644
--- a/unsupported/Eigen/NonLinearOptimization
+++ b/unsupported/Eigen/NonLinearOptimization
@@ -35,7 +35,7 @@
* a zero for the system (Powell hybrid "dogleg" method).
*
* This code is a port of minpack (http://en.wikipedia.org/wiki/MINPACK).
- * Minpack is a very famous, old, robust and well-reknown package, written in
+ * Minpack is a very famous, old, robust and well renowned package, written in
* fortran. Those implementations have been carefully tuned, tested, and used
* for several decades.
*
@@ -63,7 +63,7 @@
* Other tests were added by myself at the very beginning of the
* process and check the results for levenberg-marquardt using the reference data
* on http://www.itl.nist.gov/div898/strd/nls/nls_main.shtml. Since then i've
- * carefully checked that the same results were obtained when modifiying the
+ * carefully checked that the same results were obtained when modifying the
* code. Please note that we do not always get the exact same decimals as they do,
* but this is ok : they use 128bits float, and we do the tests using the C type 'double',
* which is 64 bits on most platforms (x86 and amd64, at least).
diff --git a/unsupported/Eigen/OpenGLSupport b/unsupported/Eigen/OpenGLSupport
index 87f50947d..11d99567e 100644
--- a/unsupported/Eigen/OpenGLSupport
+++ b/unsupported/Eigen/OpenGLSupport
@@ -25,7 +25,7 @@ namespace Eigen {
*
* This module provides wrapper functions for a couple of OpenGL functions
* which simplify the way to pass Eigen's object to openGL.
- * Here is an exmaple:
+ * Here is an example:
*
* \code
* // You need to add path_to_eigen/unsupported to your include path.
diff --git a/unsupported/Eigen/src/BVH/KdBVH.h b/unsupported/Eigen/src/BVH/KdBVH.h
index 1b8d75865..13f792cd0 100644
--- a/unsupported/Eigen/src/BVH/KdBVH.h
+++ b/unsupported/Eigen/src/BVH/KdBVH.h
@@ -170,7 +170,7 @@ private:
typedef internal::vector_int_pair<Scalar, Dim> VIPair;
typedef std::vector<VIPair, aligned_allocator<VIPair> > VIPairList;
typedef Matrix<Scalar, Dim, 1> VectorType;
- struct VectorComparator //compares vectors, or, more specificall, VIPairs along a particular dimension
+ struct VectorComparator //compares vectors, or more specifically, VIPairs along a particular dimension
{
VectorComparator(int inDim) : dim(inDim) {}
inline bool operator()(const VIPair &v1, const VIPair &v2) const { return v1.first[dim] < v2.first[dim]; }
diff --git a/unsupported/Eigen/src/Eigenvalues/ArpackSelfAdjointEigenSolver.h b/unsupported/Eigen/src/Eigenvalues/ArpackSelfAdjointEigenSolver.h
index 866a8a460..9f7bff764 100644
--- a/unsupported/Eigen/src/Eigenvalues/ArpackSelfAdjointEigenSolver.h
+++ b/unsupported/Eigen/src/Eigenvalues/ArpackSelfAdjointEigenSolver.h
@@ -300,7 +300,7 @@ public:
/** \brief Reports whether previous computation was successful.
*
- * \returns \c Success if computation was succesful, \c NoConvergence otherwise.
+ * \returns \c Success if computation was successful, \c NoConvergence otherwise.
*/
ComputationInfo info() const
{
diff --git a/unsupported/Eigen/src/EulerAngles/EulerSystem.h b/unsupported/Eigen/src/EulerAngles/EulerSystem.h
index 28f52da61..65c2e94c7 100644
--- a/unsupported/Eigen/src/EulerAngles/EulerSystem.h
+++ b/unsupported/Eigen/src/EulerAngles/EulerSystem.h
@@ -12,7 +12,7 @@
namespace Eigen
{
- // Forward declerations
+ // Forward declarations
template <typename _Scalar, class _System>
class EulerAngles;
diff --git a/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h b/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h
index dc0093eb9..37d5b4c6c 100644
--- a/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h
+++ b/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h
@@ -99,7 +99,7 @@ void pseudo_inverse(const CMatrix &C, CINVMatrix &CINV)
/** \ingroup IterativeSolvers_Module
* Constrained conjugate gradient
*
- * Computes the minimum of \f$ 1/2((Ax).x) - bx \f$ under the contraint \f$ Cx \le f \f$
+ * Computes the minimum of \f$ 1/2((Ax).x) - bx \f$ under the constraint \f$ Cx \le f \f$
*/
template<typename TMatrix, typename CMatrix,
typename VectorX, typename VectorB, typename VectorF>
diff --git a/unsupported/Eigen/src/IterativeSolvers/DGMRES.h b/unsupported/Eigen/src/IterativeSolvers/DGMRES.h
index d603ba336..5cfe49302 100644
--- a/unsupported/Eigen/src/IterativeSolvers/DGMRES.h
+++ b/unsupported/Eigen/src/IterativeSolvers/DGMRES.h
@@ -214,7 +214,7 @@ class DGMRES : public IterativeSolverBase<DGMRES<_MatrixType,_Preconditioner> >
void dgmresInitDeflation(Index& rows) const;
mutable DenseMatrix m_V; // Krylov basis vectors
mutable DenseMatrix m_H; // Hessenberg matrix
- mutable DenseMatrix m_Hes; // Initial hessenberg matrix wihout Givens rotations applied
+ mutable DenseMatrix m_Hes; // Initial hessenberg matrix without Givens rotations applied
mutable Index m_restart; // Maximum size of the Krylov subspace
mutable DenseMatrix m_U; // Vectors that form the basis of the invariant subspace
mutable DenseMatrix m_MU; // matrix operator applied to m_U (for next cycles)
@@ -250,7 +250,7 @@ void DGMRES<_MatrixType, _Preconditioner>::dgmres(const MatrixType& mat,const Rh
m_H.resize(m_restart+1, m_restart);
m_Hes.resize(m_restart, m_restart);
m_V.resize(n,m_restart+1);
- //Initial residual vector and intial norm
+ //Initial residual vector and initial norm
x = precond.solve(x);
r0 = rhs - mat * x;
RealScalar beta = r0.norm();
diff --git a/unsupported/Eigen/src/LevenbergMarquardt/LMqrsolv.h b/unsupported/Eigen/src/LevenbergMarquardt/LMqrsolv.h
index ae9d793b1..123485817 100644
--- a/unsupported/Eigen/src/LevenbergMarquardt/LMqrsolv.h
+++ b/unsupported/Eigen/src/LevenbergMarquardt/LMqrsolv.h
@@ -73,7 +73,7 @@ void lmqrsolv(
qtbpj = -givens.s() * wa[k] + givens.c() * qtbpj;
wa[k] = temp;
- /* accumulate the tranformation in the row of s. */
+ /* accumulate the transformation in the row of s. */
for (i = k+1; i<n; ++i) {
temp = givens.c() * s(i,k) + givens.s() * sdiag[i];
sdiag[i] = -givens.s() * s(i,k) + givens.c() * sdiag[i];
diff --git a/unsupported/Eigen/src/LevenbergMarquardt/LevenbergMarquardt.h b/unsupported/Eigen/src/LevenbergMarquardt/LevenbergMarquardt.h
index 995427978..5f64501be 100644
--- a/unsupported/Eigen/src/LevenbergMarquardt/LevenbergMarquardt.h
+++ b/unsupported/Eigen/src/LevenbergMarquardt/LevenbergMarquardt.h
@@ -233,9 +233,9 @@ class LevenbergMarquardt : internal::no_assignment_operator
/**
* \brief Reports whether the minimization was successful
- * \returns \c Success if the minimization was succesful,
+ * \returns \c Success if the minimization was successful,
* \c NumericalIssue if a numerical problem arises during the
- * minimization process, for exemple during the QR factorization
+ * minimization process, for example during the QR factorization
* \c NoConvergence if the minimization did not converge after
* the maximum number of function evaluation allowed
* \c InvalidInput if the input matrix is invalid
diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h b/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h
index 85ab3d97c..03356998b 100644
--- a/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h
+++ b/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h
@@ -313,7 +313,7 @@ struct matrix_exp_computeUV<MatrixType, long double>
matrix_exp_pade17(A, U, V);
}
-#elif LDBL_MANT_DIG <= 112 // quadruple precison
+#elif LDBL_MANT_DIG <= 112 // quadruple precision
if (l1norm < 1.639394610288918690547467954466970e-005L) {
matrix_exp_pade3(arg, U, V);
diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixPower.h b/unsupported/Eigen/src/MatrixFunctions/MatrixPower.h
index ebc433d89..33609aea9 100644
--- a/unsupported/Eigen/src/MatrixFunctions/MatrixPower.h
+++ b/unsupported/Eigen/src/MatrixFunctions/MatrixPower.h
@@ -81,7 +81,7 @@ class MatrixPowerParenthesesReturnValue : public ReturnByValue< MatrixPowerParen
*
* \note Currently this class is only used by MatrixPower. One may
* insist that this be nested into MatrixPower. This class is here to
- * faciliate future development of triangular matrix functions.
+ * facilitate future development of triangular matrix functions.
*/
template<typename MatrixType>
class MatrixPowerAtomic : internal::noncopyable
diff --git a/unsupported/Eigen/src/NonLinearOptimization/qrsolv.h b/unsupported/Eigen/src/NonLinearOptimization/qrsolv.h
index feafd62a8..4f2f560b3 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/qrsolv.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/qrsolv.h
@@ -61,7 +61,7 @@ void qrsolv(
qtbpj = -givens.s() * wa[k] + givens.c() * qtbpj;
wa[k] = temp;
- /* accumulate the tranformation in the row of s. */
+ /* accumulate the transformation in the row of s. */
for (i = k+1; i<n; ++i) {
temp = givens.c() * s(i,k) + givens.s() * sdiag[i];
sdiag[i] = -givens.s() * s(i,k) + givens.c() * sdiag[i];
diff --git a/unsupported/Eigen/src/NonLinearOptimization/r1updt.h b/unsupported/Eigen/src/NonLinearOptimization/r1updt.h
index f28766061..09fc65255 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/r1updt.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/r1updt.h
@@ -22,7 +22,7 @@ void r1updt(
Scalar temp;
JacobiRotation<Scalar> givens;
- // r1updt had a broader usecase, but we dont use it here. And, more
+ // r1updt had a broader usecase, but we don't use it here. And, more
// importantly, we can not test it.
eigen_assert(m==n);
eigen_assert(u.size()==m);
diff --git a/unsupported/Eigen/src/Polynomials/Companion.h b/unsupported/Eigen/src/Polynomials/Companion.h
index e0af6ebe4..41a4efc2f 100644
--- a/unsupported/Eigen/src/Polynomials/Companion.h
+++ b/unsupported/Eigen/src/Polynomials/Companion.h
@@ -104,7 +104,7 @@ class companion
/** Helper function for the balancing algorithm.
* \returns true if the row and the column, having colNorm and rowNorm
* as norms, are balanced, false otherwise.
- * colB and rowB are repectively the multipliers for
+ * colB and rowB are respectively the multipliers for
* the column and the row in order to balance them.
* */
bool balanced( RealScalar colNorm, RealScalar rowNorm,
@@ -113,7 +113,7 @@ class companion
/** Helper function for the balancing algorithm.
* \returns true if the row and the column, having colNorm and rowNorm
* as norms, are balanced, false otherwise.
- * colB and rowB are repectively the multipliers for
+ * colB and rowB are respectively the multipliers for
* the column and the row in order to balance them.
* */
bool balancedR( RealScalar colNorm, RealScalar rowNorm,
diff --git a/unsupported/Eigen/src/Skyline/SkylineInplaceLU.h b/unsupported/Eigen/src/Skyline/SkylineInplaceLU.h
index a1f54ed35..bda057a85 100644
--- a/unsupported/Eigen/src/Skyline/SkylineInplaceLU.h
+++ b/unsupported/Eigen/src/Skyline/SkylineInplaceLU.h
@@ -41,7 +41,7 @@ public:
/** Sets the relative threshold value used to prune zero coefficients during the decomposition.
*
- * Setting a value greater than zero speeds up computation, and yields to an imcomplete
+ * Setting a value greater than zero speeds up computation, and yields to an incomplete
* factorization with fewer non zero coefficients. Such approximate factors are especially
* useful to initialize an iterative solver.
*
diff --git a/unsupported/Eigen/src/Skyline/SkylineMatrix.h b/unsupported/Eigen/src/Skyline/SkylineMatrix.h
index a2a8933ca..f77d79a04 100644
--- a/unsupported/Eigen/src/Skyline/SkylineMatrix.h
+++ b/unsupported/Eigen/src/Skyline/SkylineMatrix.h
@@ -206,26 +206,26 @@ public:
if (col > row) //upper matrix
{
const Index minOuterIndex = inner - m_data.upperProfile(inner);
- eigen_assert(outer >= minOuterIndex && "you try to acces a coeff that do not exist in the storage");
+ eigen_assert(outer >= minOuterIndex && "You tried to access a coeff that does not exist in the storage");
return this->m_data.upper(m_colStartIndex[inner] + outer - (inner - m_data.upperProfile(inner)));
}
if (col < row) //lower matrix
{
const Index minInnerIndex = outer - m_data.lowerProfile(outer);
- eigen_assert(inner >= minInnerIndex && "you try to acces a coeff that do not exist in the storage");
+ eigen_assert(inner >= minInnerIndex && "You tried to access a coeff that does not exist in the storage");
return this->m_data.lower(m_rowStartIndex[outer] + inner - (outer - m_data.lowerProfile(outer)));
}
} else {
if (outer > inner) //upper matrix
{
const Index maxOuterIndex = inner + m_data.upperProfile(inner);
- eigen_assert(outer <= maxOuterIndex && "you try to acces a coeff that do not exist in the storage");
+ eigen_assert(outer <= maxOuterIndex && "You tried to access a coeff that does not exist in the storage");
return this->m_data.upper(m_colStartIndex[inner] + (outer - inner));
}
if (outer < inner) //lower matrix
{
const Index maxInnerIndex = outer + m_data.lowerProfile(outer);
- eigen_assert(inner <= maxInnerIndex && "you try to acces a coeff that do not exist in the storage");
+ eigen_assert(inner <= maxInnerIndex && "You tried to access a coeff that does not exist in the storage");
return this->m_data.lower(m_rowStartIndex[outer] + (inner - outer));
}
}
@@ -300,11 +300,11 @@ public:
if (IsRowMajor) {
const Index minInnerIndex = outer - m_data.lowerProfile(outer);
- eigen_assert(inner >= minInnerIndex && "you try to acces a coeff that do not exist in the storage");
+ eigen_assert(inner >= minInnerIndex && "You tried to access a coeff that does not exist in the storage");
return this->m_data.lower(m_rowStartIndex[outer] + inner - (outer - m_data.lowerProfile(outer)));
} else {
const Index maxInnerIndex = outer + m_data.lowerProfile(outer);
- eigen_assert(inner <= maxInnerIndex && "you try to acces a coeff that do not exist in the storage");
+ eigen_assert(inner <= maxInnerIndex && "You tried to access a coeff that does not exist in the storage");
return this->m_data.lower(m_rowStartIndex[outer] + (inner - outer));
}
}
@@ -336,11 +336,11 @@ public:
if (IsRowMajor) {
const Index minOuterIndex = inner - m_data.upperProfile(inner);
- eigen_assert(outer >= minOuterIndex && "you try to acces a coeff that do not exist in the storage");
+ eigen_assert(outer >= minOuterIndex && "You tried to access a coeff that does not exist in the storage");
return this->m_data.upper(m_colStartIndex[inner] + outer - (inner - m_data.upperProfile(inner)));
} else {
const Index maxOuterIndex = inner + m_data.upperProfile(inner);
- eigen_assert(outer <= maxOuterIndex && "you try to acces a coeff that do not exist in the storage");
+ eigen_assert(outer <= maxOuterIndex && "You tried to access a coeff that does not exist in the storage");
return this->m_data.upper(m_colStartIndex[inner] + (outer - inner));
}
}
diff --git a/unsupported/Eigen/src/SparseExtra/DynamicSparseMatrix.h b/unsupported/Eigen/src/SparseExtra/DynamicSparseMatrix.h
index 037a13f86..3f1ff14ad 100644
--- a/unsupported/Eigen/src/SparseExtra/DynamicSparseMatrix.h
+++ b/unsupported/Eigen/src/SparseExtra/DynamicSparseMatrix.h
@@ -187,7 +187,7 @@ template<typename _Scalar, int _Options, typename _StorageIndex>
/** Does nothing: provided for compatibility with SparseMatrix */
inline void finalize() {}
- /** Suppress all nonzeros which are smaller than \a reference under the tolerence \a epsilon */
+ /** Suppress all nonzeros which are smaller than \a reference under the tolerance \a epsilon */
void prune(Scalar reference, RealScalar epsilon = NumTraits<RealScalar>::dummy_precision())
{
for (Index j=0; j<outerSize(); ++j)
@@ -224,21 +224,21 @@ template<typename _Scalar, int _Options, typename _StorageIndex>
}
}
- /** The class DynamicSparseMatrix is deprectaed */
+ /** The class DynamicSparseMatrix is deprecated */
EIGEN_DEPRECATED inline DynamicSparseMatrix()
: m_innerSize(0), m_data(0)
{
eigen_assert(innerSize()==0 && outerSize()==0);
}
- /** The class DynamicSparseMatrix is deprectaed */
+ /** The class DynamicSparseMatrix is deprecated */
EIGEN_DEPRECATED inline DynamicSparseMatrix(Index rows, Index cols)
: m_innerSize(0)
{
resize(rows, cols);
}
- /** The class DynamicSparseMatrix is deprectaed */
+ /** The class DynamicSparseMatrix is deprecated */
template<typename OtherDerived>
EIGEN_DEPRECATED explicit inline DynamicSparseMatrix(const SparseMatrixBase<OtherDerived>& other)
: m_innerSize(0)
diff --git a/unsupported/Eigen/src/SparseExtra/MarketIO.h b/unsupported/Eigen/src/SparseExtra/MarketIO.h
index 6d57ab2e9..1618b09a8 100644
--- a/unsupported/Eigen/src/SparseExtra/MarketIO.h
+++ b/unsupported/Eigen/src/SparseExtra/MarketIO.h
@@ -104,7 +104,7 @@ namespace internal
out << value.real << " " << value.imag()<< "\n";
}
-} // end namepsace internal
+} // end namespace internal
inline bool getMarketHeader(const std::string& filename, int& sym, bool& iscomplex, bool& isvector)
{
diff --git a/unsupported/Eigen/src/Splines/SplineFitting.h b/unsupported/Eigen/src/Splines/SplineFitting.h
index c761a9b3d..1a4b80a2f 100644
--- a/unsupported/Eigen/src/Splines/SplineFitting.h
+++ b/unsupported/Eigen/src/Splines/SplineFitting.h
@@ -181,7 +181,7 @@ namespace Eigen
* \ingroup Splines_Module
*
* \param[in] pts The data points to which a spline should be fit.
- * \param[out] chord_lengths The resulting chord lenggth vector.
+ * \param[out] chord_lengths The resulting chord length vector.
*
* \sa Les Piegl and Wayne Tiller, The NURBS book (2nd ed.), 1997, 9.2.1 Global Curve Interpolation to Point Data
**/
diff --git a/unsupported/README.txt b/unsupported/README.txt
index 83479ff0b..70793bf13 100644
--- a/unsupported/README.txt
+++ b/unsupported/README.txt
@@ -20,7 +20,7 @@ However, it:
- must rely on Eigen,
- must be highly related to math,
- should have some general purpose in the sense that it could
- potentially become an offical Eigen module (or be merged into another one).
+ potentially become an official Eigen module (or be merged into another one).
In doubt feel free to contact us. For instance, if your addons is very too specific
but it shows an interesting way of using Eigen, then it could be a nice demo.
diff --git a/unsupported/bench/bench_svd.cpp b/unsupported/bench/bench_svd.cpp
index 01d8231ae..e7028a2b9 100644
--- a/unsupported/bench/bench_svd.cpp
+++ b/unsupported/bench/bench_svd.cpp
@@ -70,7 +70,7 @@ void bench_svd(const MatrixType& a = MatrixType())
std::cout<< std::endl;
timerJacobi.reset();
timerBDC.reset();
- cout << " Computes rotaion matrix" <<endl;
+ cout << " Computes rotation matrix" <<endl;
for (int k=1; k<=NUMBER_SAMPLE; ++k)
{
timerBDC.start();
diff --git a/unsupported/test/CMakeLists.txt b/unsupported/test/CMakeLists.txt
index 42b790d55..e99eab0e3 100644
--- a/unsupported/test/CMakeLists.txt
+++ b/unsupported/test/CMakeLists.txt
@@ -1,5 +1,5 @@
# generate split test header file only if it does not yet exist
-# in order to prevent a rebuild everytime cmake is configured
+# in order to prevent a rebuild every time cmake is configured
if(NOT EXISTS ${CMAKE_CURRENT_BINARY_DIR}/split_test_helper.h)
file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/split_test_helper.h "")
foreach(i RANGE 1 999)
diff --git a/unsupported/test/autodiff_scalar.cpp b/unsupported/test/autodiff_scalar.cpp
index a917ec344..1d4418203 100644
--- a/unsupported/test/autodiff_scalar.cpp
+++ b/unsupported/test/autodiff_scalar.cpp
@@ -81,7 +81,7 @@ void check_limits_specialization()
typedef std::numeric_limits<AD> A;
typedef std::numeric_limits<Scalar> B;
- // workaround "unsed typedef" warning:
+ // workaround "unused typedef" warning:
VERIFY(!bool(internal::is_same<B, A>::value));
#if EIGEN_HAS_CXX11
diff --git a/unsupported/test/cxx11_tensor_inflation_sycl.cpp b/unsupported/test/cxx11_tensor_inflation_sycl.cpp
index f2f87f7ed..cf3e29f4c 100644
--- a/unsupported/test/cxx11_tensor_inflation_sycl.cpp
+++ b/unsupported/test/cxx11_tensor_inflation_sycl.cpp
@@ -22,10 +22,10 @@
using Eigen::Tensor;
-// Inflation Defenition for each dimention the inflated val would be
+// Inflation Definition for each dimension the inflated val would be
//((dim-1)*strid[dim] +1)
-// for 1 dimnention vector of size 3 with value (4,4,4) with the inflated stride value of 3 would be changed to
+// for 1 dimension vector of size 3 with value (4,4,4) with the inflated stride value of 3 would be changed to
// tensor of size (2*3) +1 = 7 with the value of
// (4, 0, 0, 4, 0, 0, 4).
diff --git a/unsupported/test/cxx11_tensor_of_float16_cuda.cu b/unsupported/test/cxx11_tensor_of_float16_cuda.cu
index 167b75d25..7a751ff02 100644
--- a/unsupported/test/cxx11_tensor_of_float16_cuda.cu
+++ b/unsupported/test/cxx11_tensor_of_float16_cuda.cu
@@ -247,7 +247,7 @@ void test_cuda_trancendental() {
}
for (int i = 0; i < num_elem; ++i) {
std::cout << "Checking elemwise log " << i << " input = " << input2(i) << " full = " << full_prec2(i) << " half = " << half_prec2(i) << std::endl;
- if(std::abs(input2(i)-1.f)<0.05f) // log lacks accurary nearby 1
+ if(std::abs(input2(i)-1.f)<0.05f) // log lacks accuracy nearby 1
VERIFY_IS_APPROX(full_prec2(i)+Eigen::half(0.1f), half_prec2(i)+Eigen::half(0.1f));
else
VERIFY_IS_APPROX(full_prec2(i), half_prec2(i));
diff --git a/unsupported/test/cxx11_tensor_random_cuda.cu b/unsupported/test/cxx11_tensor_random_cuda.cu
index fa1a46732..389c0a8c2 100644
--- a/unsupported/test/cxx11_tensor_random_cuda.cu
+++ b/unsupported/test/cxx11_tensor_random_cuda.cu
@@ -37,7 +37,7 @@ void test_cuda_random_uniform()
assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
- // For now we just check thes code doesn't crash.
+ // For now we just check this code doesn't crash.
// TODO: come up with a valid test of randomness
}
diff --git a/unsupported/test/forward_adolc.cpp b/unsupported/test/forward_adolc.cpp
index 866db8e86..6d0ae738d 100644
--- a/unsupported/test/forward_adolc.cpp
+++ b/unsupported/test/forward_adolc.cpp
@@ -132,7 +132,7 @@ void test_forward_adolc()
}
{
- // simple instanciation tests
+ // simple instantiation tests
Matrix<adtl::adouble,2,1> x;
foo(x);
Matrix<adtl::adouble,Dynamic,Dynamic> A(4,4);;
diff --git a/unsupported/test/sparse_extra.cpp b/unsupported/test/sparse_extra.cpp
index 4f6723d6d..7cf4a77c3 100644
--- a/unsupported/test/sparse_extra.cpp
+++ b/unsupported/test/sparse_extra.cpp
@@ -8,7 +8,7 @@
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-// import basic and product tests for deprectaed DynamicSparseMatrix
+// import basic and product tests for deprecated DynamicSparseMatrix
#define EIGEN_NO_DEPRECATED_WARNING
#include "sparse_basic.cpp"
#include "sparse_product.cpp"