aboutsummaryrefslogtreecommitdiffhomepage
path: root/unsupported
diff options
context:
space:
mode:
authorGravatar luz.paz" <luzpaz@users.noreply.github.com>2018-09-18 04:15:01 -0400
committerGravatar luz.paz" <luzpaz@users.noreply.github.com>2018-09-18 04:15:01 -0400
commitf67b19a884768df88107d44e28542ae5dde677d2 (patch)
treed605213b184ce0c656be2bc2b270d63fed375e26 /unsupported
parent77b447c24e3344e43ff64eb932d4bb35a2db01ce (diff)
[PATCH 1/2] Misc. typos
From 68d431b4c14ad60a778ee93c1f59ecc4b931950e Mon Sep 17 00:00:00 2001 Found via `codespell -q 3 -I ../eigen-word-whitelist.txt` where the whitelists consists of: ``` als ans cas dum lastr lowd nd overfl pres preverse substraction te uint whch ``` --- CMakeLists.txt | 26 +++++++++---------- Eigen/src/Core/GenericPacketMath.h | 2 +- Eigen/src/SparseLU/SparseLU.h | 2 +- bench/bench_norm.cpp | 2 +- doc/HiPerformance.dox | 2 +- doc/QuickStartGuide.dox | 2 +- .../Eigen/CXX11/src/Tensor/TensorChipping.h | 6 ++--- .../Eigen/CXX11/src/Tensor/TensorDeviceGpu.h | 2 +- .../src/Tensor/TensorForwardDeclarations.h | 4 +-- .../src/Tensor/TensorGpuHipCudaDefines.h | 2 +- .../Eigen/CXX11/src/Tensor/TensorReduction.h | 2 +- .../CXX11/src/Tensor/TensorReductionGpu.h | 2 +- .../test/cxx11_tensor_concatenation.cpp | 2 +- unsupported/test/cxx11_tensor_executor.cpp | 2 +- 14 files changed, 29 insertions(+), 29 deletions(-)
Diffstat (limited to 'unsupported')
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorChipping.h6
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorDeviceGpu.h2
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h4
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorGpuHipCudaDefines.h2
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h2
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorReductionGpu.h2
-rw-r--r--unsupported/test/cxx11_tensor_concatenation.cpp2
-rw-r--r--unsupported/test/cxx11_tensor_executor.cpp2
8 files changed, 11 insertions, 11 deletions
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorChipping.h b/unsupported/Eigen/CXX11/src/Tensor/TensorChipping.h
index b47fa9e8e..8c0644925 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorChipping.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorChipping.h
@@ -244,7 +244,7 @@ struct TensorEvaluator<const TensorChippingOp<DimId, ArgType>, Device>
return rslt;
} else if ((static_cast<int>(Layout) == static_cast<int>(ColMajor) && m_dim.actualDim() == NumInputDims - 1) ||
(static_cast<int>(Layout) == static_cast<int>(RowMajor) && m_dim.actualDim() == 0)) {
- // m_stride is aways greater than index, so let's avoid the integer division.
+ // m_stride is always greater than index, so let's avoid the integer division.
eigen_assert(m_stride > index);
return m_impl.template packet<LoadMode>(index + m_inputOffset);
} else {
@@ -377,7 +377,7 @@ struct TensorEvaluator<const TensorChippingOp<DimId, ArgType>, Device>
inputIndex = index * m_inputStride + m_inputOffset;
} else if ((static_cast<int>(Layout) == static_cast<int>(ColMajor) && m_dim.actualDim() == NumInputDims - 1) ||
(static_cast<int>(Layout) == static_cast<int>(RowMajor) && m_dim.actualDim() == 0)) {
- // m_stride is aways greater than index, so let's avoid the integer
+ // m_stride is always greater than index, so let's avoid the integer
// division.
eigen_assert(m_stride > index);
inputIndex = index + m_inputOffset;
@@ -462,7 +462,7 @@ struct TensorEvaluator<TensorChippingOp<DimId, ArgType>, Device>
}
} else if ((static_cast<int>(this->Layout) == static_cast<int>(ColMajor) && this->m_dim.actualDim() == NumInputDims-1) ||
(static_cast<int>(this->Layout) == static_cast<int>(RowMajor) && this->m_dim.actualDim() == 0)) {
- // m_stride is aways greater than index, so let's avoid the integer division.
+ // m_stride is always greater than index, so let's avoid the integer division.
eigen_assert(this->m_stride > index);
this->m_impl.template writePacket<StoreMode>(index + this->m_inputOffset, x);
} else {
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceGpu.h b/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceGpu.h
index b490433db..83cde6afb 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceGpu.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceGpu.h
@@ -12,7 +12,7 @@
// This header file container defines fo gpu* macros which will resolve to
// their equivalent hip* or cuda* versions depending on the compiler in use
-// A separte header (included at the end of this file) will undefine all
+// A separate header (included at the end of this file) will undefine all
#include "TensorGpuHipCudaDefines.h"
namespace Eigen {
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h b/unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h
index 04a8b953d..09b7c994b 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h
@@ -25,9 +25,9 @@ template<typename T> struct MakePointer {
};
// The PointerType class is a container of the device specefic pointer
-// used for refering to a Pointer on TensorEvaluator class. While the TensorExpression
+// used for referring to a Pointer on TensorEvaluator class. While the TensorExpression
// is a device-agnostic type and need MakePointer class for type conversion,
-// the TensorEvaluator calss can be specialized for a device, hence it is possible
+// the TensorEvaluator calls can be specialized for a device, hence it is possible
// to construct different types of temproray storage memory in TensorEvaluator
// for different devices by specializing the following PointerType class.
template<typename T, typename Device> struct PointerType : MakePointer<T>{};
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorGpuHipCudaDefines.h b/unsupported/Eigen/CXX11/src/Tensor/TensorGpuHipCudaDefines.h
index 5438ebe71..40f58f628 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorGpuHipCudaDefines.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorGpuHipCudaDefines.h
@@ -16,7 +16,7 @@
// for some reason gets sent to the gcc/host compiler instead of the gpu/nvcc/hipcc compiler
// When compiling such files, gcc will end up trying to pick up the CUDA headers by
// default (see the code within "unsupported/Eigen/CXX11/Tensor" that is guarded by EIGEN_USE_GPU)
-// This will obsviously not work when trying to compile tensorflow on a sytem with no CUDA
+// This will obsviously not work when trying to compile tensorflow on a system with no CUDA
// To work around this issue for HIP systems (and leave the default behaviour intact), the
// HIP tensorflow build defines EIGEN_USE_HIP when compiling all source files, and
// "unsupported/Eigen/CXX11/Tensor" has been updated to use HIP header when EIGEN_USE_HIP is
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h b/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h
index 2c69e4fd4..bda114751 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h
@@ -965,7 +965,7 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType, MakePointer_>,
}
}
- // Intialize output coefficient reducers.
+ // Initialize output coefficient reducers.
for (int i = 0; i < num_reducers; ++i) {
new (&reducers[i]) BlockReducer(m_reducer);
}
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorReductionGpu.h b/unsupported/Eigen/CXX11/src/Tensor/TensorReductionGpu.h
index 375c570b3..0718ba2a1 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorReductionGpu.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorReductionGpu.h
@@ -771,7 +771,7 @@ struct OuterReducer<Self, Op, GpuDevice> {
// terminate called after throwing an instance of 'std::runtime_error'
// what(): No device code available for function: _ZN5Eigen8internal20OuterReductionKernelIL...
//
- // dont know why this happens (and why is it a runtime error instead of a compile time errror)
+ // don't know why this happens (and why is it a runtime error instead of a compile time error)
//
// this will be fixed by HIP PR#457
EIGEN_DEVICE_FUNC
diff --git a/unsupported/test/cxx11_tensor_concatenation.cpp b/unsupported/test/cxx11_tensor_concatenation.cpp
index e223d9ffd..bb9418d33 100644
--- a/unsupported/test/cxx11_tensor_concatenation.cpp
+++ b/unsupported/test/cxx11_tensor_concatenation.cpp
@@ -50,7 +50,7 @@ static void test_static_dimension_failure()
.reshape(Tensor<int, 3>::Dimensions(2, 3, 1))
.concatenate(right, 0);
Tensor<int, 2, DataLayout> alternative = left
- // Clang compiler break with {{{}}} with an ambigous error on copy constructor
+ // Clang compiler break with {{{}}} with an ambiguous error on copy constructor
// the variadic DSize constructor added for #ifndef EIGEN_EMULATE_CXX11_META_H.
// Solution:
// either the code should change to
diff --git a/unsupported/test/cxx11_tensor_executor.cpp b/unsupported/test/cxx11_tensor_executor.cpp
index aa789c2e4..18c87b35e 100644
--- a/unsupported/test/cxx11_tensor_executor.cpp
+++ b/unsupported/test/cxx11_tensor_executor.cpp
@@ -433,7 +433,7 @@ static void test_execute_slice_lvalue(Device d)
Tensor<T, NumDims, Options, Index> slice(slice_size);
slice.setRandom();
- // Asign a slice using default executor.
+ // Assign a slice using default executor.
Tensor<T, NumDims, Options, Index> golden = src;
golden.slice(slice_start, slice_size) = slice;