aboutsummaryrefslogtreecommitdiffhomepage
path: root/unsupported/Eigen/CXX11/src/Tensor
diff options
context:
space:
mode:
authorGravatar Gael Guennebaud <g.gael@free.fr>2018-09-21 23:02:33 +0200
committerGravatar Gael Guennebaud <g.gael@free.fr>2018-09-21 23:02:33 +0200
commitc696dbcaa6e17cdfa6c9ff37dadf89cf4b707504 (patch)
tree841add75dbe983f02568e9bff416c90eb905e050 /unsupported/Eigen/CXX11/src/Tensor
parente3c82890474fa3ab4b49a0c97b8b4eccce93a77a (diff)
Fiw shadowing of last and all
Diffstat (limited to 'unsupported/Eigen/CXX11/src/Tensor')
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorContractionMapper.h6
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorDeviceThreadPool.h12
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h48
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorPadding.h40
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorReductionGpu.h4
5 files changed, 55 insertions, 55 deletions
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionMapper.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionMapper.h
index dbb0f76bb..2d3b69128 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionMapper.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionMapper.h
@@ -255,7 +255,7 @@ class BaseTensorContractionMapper : public SimpleTensorContractionMapper<Scalar,
const IndexPair<Index> indexPair = this->computeIndexPair(i, j, packet_size - 1);
const Index first = indexPair.first;
- const Index last = indexPair.second;
+ const Index lastIdx = indexPair.second;
// We can always do optimized packet reads from left hand side right now, because
// the vertical matrix dimension on the left hand side is never contracting.
@@ -263,7 +263,7 @@ class BaseTensorContractionMapper : public SimpleTensorContractionMapper<Scalar,
// been shuffled first.
if (Tensor::PacketAccess &&
(side == Lhs || internal::array_size<contract_t>::value <= 1 || !inner_dim_reordered) &&
- (last - first) == (packet_size - 1)) {
+ (lastIdx - first) == (packet_size - 1)) {
return this->m_tensor.template packet<AlignmentType>(first);
}
@@ -276,7 +276,7 @@ class BaseTensorContractionMapper : public SimpleTensorContractionMapper<Scalar,
data[k] = this->m_tensor.coeff(internal_pair.first);
data[k + 1] = this->m_tensor.coeff(internal_pair.second);
}
- data[packet_size - 1] = this->m_tensor.coeff(last);
+ data[packet_size - 1] = this->m_tensor.coeff(lastIdx);
return pload<PacketT>(data);
}
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceThreadPool.h
index 6fc6688d3..1612c004b 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceThreadPool.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceThreadPool.h
@@ -213,17 +213,17 @@ struct ThreadPoolDevice {
// block_count leaves that do actual computations.
Barrier barrier(static_cast<unsigned int>(block_count));
std::function<void(Index, Index)> handleRange;
- handleRange = [=, &handleRange, &barrier, &f](Index first, Index last) {
- if (last - first <= block_size) {
+ handleRange = [=, &handleRange, &barrier, &f](Index firstIdx, Index lastIdx) {
+ if (lastIdx - firstIdx <= block_size) {
// Single block or less, execute directly.
- f(first, last);
+ f(firstIdx, lastIdx);
barrier.Notify();
return;
}
// Split into halves and submit to the pool.
- Index mid = first + divup((last - first) / 2, block_size) * block_size;
- pool_->Schedule([=, &handleRange]() { handleRange(mid, last); });
- handleRange(first, mid);
+ Index mid = firstIdx + divup((lastIdx - firstIdx) / 2, block_size) * block_size;
+ pool_->Schedule([=, &handleRange]() { handleRange(mid, lastIdx); });
+ handleRange(firstIdx, mid);
};
handleRange(0, n);
barrier.Wait();
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h b/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h
index bfe1f97b8..1c44541bd 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h
@@ -165,11 +165,11 @@ class TensorExecutor<Expression, DefaultDevice, Vectorizable,
#ifdef EIGEN_USE_THREADS
template <typename Evaluator, typename StorageIndex, bool Vectorizable>
struct EvalRange {
- static void run(Evaluator* evaluator_in, const StorageIndex first,
- const StorageIndex last) {
+ static void run(Evaluator* evaluator_in, const StorageIndex firstIdx,
+ const StorageIndex lastIdx) {
Evaluator evaluator = *evaluator_in;
- eigen_assert(last >= first);
- for (StorageIndex i = first; i < last; ++i) {
+ eigen_assert(lastIdx >= firstIdx);
+ for (StorageIndex i = firstIdx; i < lastIdx; ++i) {
evaluator.evalScalar(i);
}
}
@@ -182,14 +182,14 @@ struct EvalRange<Evaluator, StorageIndex, /*Vectorizable*/ true> {
static const int PacketSize =
unpacket_traits<typename Evaluator::PacketReturnType>::size;
- static void run(Evaluator* evaluator_in, const StorageIndex first,
- const StorageIndex last) {
+ static void run(Evaluator* evaluator_in, const StorageIndex firstIdx,
+ const StorageIndex lastIdx) {
Evaluator evaluator = *evaluator_in;
- eigen_assert(last >= first);
- StorageIndex i = first;
- if (last - first >= PacketSize) {
- eigen_assert(first % PacketSize == 0);
- StorageIndex last_chunk_offset = last - 4 * PacketSize;
+ eigen_assert(lastIdx >= firstIdx);
+ StorageIndex i = firstIdx;
+ if (lastIdx - firstIdx >= PacketSize) {
+ eigen_assert(firstIdx % PacketSize == 0);
+ StorageIndex last_chunk_offset = lastIdx - 4 * PacketSize;
// Give compiler a strong possibility to unroll the loop. But don't insist
// on unrolling, because if the function is expensive compiler should not
// unroll the loop at the expense of inlining.
@@ -198,12 +198,12 @@ struct EvalRange<Evaluator, StorageIndex, /*Vectorizable*/ true> {
evaluator.evalPacket(i + j * PacketSize);
}
}
- last_chunk_offset = last - PacketSize;
+ last_chunk_offset = lastIdx - PacketSize;
for (; i <= last_chunk_offset; i += PacketSize) {
evaluator.evalPacket(i);
}
}
- for (; i < last; ++i) {
+ for (; i < lastIdx; ++i) {
evaluator.evalScalar(i);
}
}
@@ -234,8 +234,8 @@ class TensorExecutor<Expression, ThreadPoolDevice, Vectorizable, Tileable> {
const StorageIndex size = array_prod(evaluator.dimensions());
device.parallelFor(size, evaluator.costPerCoeff(Vectorizable),
EvalRange::alignBlockSize,
- [&evaluator](StorageIndex first, StorageIndex last) {
- EvalRange::run(&evaluator, first, last);
+ [&evaluator](StorageIndex firstIdx, StorageIndex lastIdx) {
+ EvalRange::run(&evaluator, firstIdx, lastIdx);
});
}
evaluator.cleanup();
@@ -292,8 +292,8 @@ class TensorExecutor<Expression, ThreadPoolDevice, Vectorizable, /*Tileable*/ tr
void* buf = device.allocate((num_threads + 1) * aligned_blocksize);
device.parallelFor(
block_mapper.total_block_count(), cost * block_size,
- [=, &device, &evaluator, &block_mapper](StorageIndex first,
- StorageIndex last) {
+ [=, &device, &evaluator, &block_mapper](StorageIndex firstIdx,
+ StorageIndex lastIdx) {
// currentThreadId() returns -1 if called from a thread not in the
// thread pool, such as the main thread dispatching Eigen
// expressions.
@@ -301,7 +301,7 @@ class TensorExecutor<Expression, ThreadPoolDevice, Vectorizable, /*Tileable*/ tr
eigen_assert(thread_idx >= -1 && thread_idx < num_threads);
Scalar* thread_buf = reinterpret_cast<Scalar*>(
static_cast<char*>(buf) + aligned_blocksize * (thread_idx + 1));
- for (StorageIndex i = first; i < last; ++i) {
+ for (StorageIndex i = firstIdx; i < lastIdx; ++i) {
auto block = block_mapper.GetBlockForIndex(i, thread_buf);
evaluator.evalBlock(&block);
}
@@ -330,8 +330,8 @@ class TensorExecutor<Expression, GpuDevice, Vectorizable, Tileable> {
template <typename Evaluator, typename StorageIndex, bool Vectorizable>
struct EigenMetaKernelEval {
static __device__ EIGEN_ALWAYS_INLINE
- void run(Evaluator& eval, StorageIndex first, StorageIndex last, StorageIndex step_size) {
- for (StorageIndex i = first; i < last; i += step_size) {
+ void run(Evaluator& eval, StorageIndex firstIdx, StorageIndex lastIdx, StorageIndex step_size) {
+ for (StorageIndex i = firstIdx; i < lastIdx; i += step_size) {
eval.evalScalar(i);
}
}
@@ -340,17 +340,17 @@ struct EigenMetaKernelEval {
template <typename Evaluator, typename StorageIndex>
struct EigenMetaKernelEval<Evaluator, StorageIndex, true> {
static __device__ EIGEN_ALWAYS_INLINE
- void run(Evaluator& eval, StorageIndex first, StorageIndex last, StorageIndex step_size) {
+ void run(Evaluator& eval, StorageIndex firstIdx, StorageIndex lastIdx, StorageIndex step_size) {
const StorageIndex PacketSize = unpacket_traits<typename Evaluator::PacketReturnType>::size;
- const StorageIndex vectorized_size = (last / PacketSize) * PacketSize;
+ const StorageIndex vectorized_size = (lastIdx / PacketSize) * PacketSize;
const StorageIndex vectorized_step_size = step_size * PacketSize;
// Use the vector path
- for (StorageIndex i = first * PacketSize; i < vectorized_size;
+ for (StorageIndex i = firstIdx * PacketSize; i < vectorized_size;
i += vectorized_step_size) {
eval.evalPacket(i);
}
- for (StorageIndex i = vectorized_size + first; i < last; i += step_size) {
+ for (StorageIndex i = vectorized_size + firstIdx; i < lastIdx; i += step_size) {
eval.evalScalar(i);
}
}
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorPadding.h b/unsupported/Eigen/CXX11/src/Tensor/TensorPadding.h
index 59c1704ed..4837f2200 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorPadding.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorPadding.h
@@ -273,21 +273,21 @@ struct TensorEvaluator<const TensorPaddingOp<PaddingDimensions, ArgType>, Device
const Index initialIndex = index;
Index inputIndex = 0;
for (int i = NumDims - 1; i > 0; --i) {
- const Index first = index;
- const Index last = index + PacketSize - 1;
+ const Index firstIdx = index;
+ const Index lastIdx = index + PacketSize - 1;
const Index lastPaddedLeft = m_padding[i].first * m_outputStrides[i];
const Index firstPaddedRight = (m_dimensions[i] - m_padding[i].second) * m_outputStrides[i];
const Index lastPaddedRight = m_outputStrides[i+1];
- if (!isLeftPaddingCompileTimeZero(i) && last < lastPaddedLeft) {
+ if (!isLeftPaddingCompileTimeZero(i) && lastIdx < lastPaddedLeft) {
// all the coefficient are in the padding zone.
return internal::pset1<PacketReturnType>(m_paddingValue);
}
- else if (!isRightPaddingCompileTimeZero(i) && first >= firstPaddedRight && last < lastPaddedRight) {
+ else if (!isRightPaddingCompileTimeZero(i) && firstIdx >= firstPaddedRight && lastIdx < lastPaddedRight) {
// all the coefficient are in the padding zone.
return internal::pset1<PacketReturnType>(m_paddingValue);
}
- else if ((isLeftPaddingCompileTimeZero(i) && isRightPaddingCompileTimeZero(i)) || (first >= lastPaddedLeft && last < firstPaddedRight)) {
+ else if ((isLeftPaddingCompileTimeZero(i) && isRightPaddingCompileTimeZero(i)) || (firstIdx >= lastPaddedLeft && lastIdx < firstPaddedRight)) {
// all the coefficient are between the 2 padding zones.
const Index idx = index / m_outputStrides[i];
inputIndex += (idx - m_padding[i].first) * m_inputStrides[i];
@@ -299,21 +299,21 @@ struct TensorEvaluator<const TensorPaddingOp<PaddingDimensions, ArgType>, Device
}
}
- const Index last = index + PacketSize - 1;
- const Index first = index;
+ const Index lastIdx = index + PacketSize - 1;
+ const Index firstIdx = index;
const Index lastPaddedLeft = m_padding[0].first;
const Index firstPaddedRight = (m_dimensions[0] - m_padding[0].second);
const Index lastPaddedRight = m_outputStrides[1];
- if (!isLeftPaddingCompileTimeZero(0) && last < lastPaddedLeft) {
+ if (!isLeftPaddingCompileTimeZero(0) && lastIdx < lastPaddedLeft) {
// all the coefficient are in the padding zone.
return internal::pset1<PacketReturnType>(m_paddingValue);
}
- else if (!isRightPaddingCompileTimeZero(0) && first >= firstPaddedRight && last < lastPaddedRight) {
+ else if (!isRightPaddingCompileTimeZero(0) && firstIdx >= firstPaddedRight && lastIdx < lastPaddedRight) {
// all the coefficient are in the padding zone.
return internal::pset1<PacketReturnType>(m_paddingValue);
}
- else if ((isLeftPaddingCompileTimeZero(0) && isRightPaddingCompileTimeZero(0)) || (first >= lastPaddedLeft && last < firstPaddedRight)) {
+ else if ((isLeftPaddingCompileTimeZero(0) && isRightPaddingCompileTimeZero(0)) || (firstIdx >= lastPaddedLeft && lastIdx < firstPaddedRight)) {
// all the coefficient are between the 2 padding zones.
inputIndex += (index - m_padding[0].first);
return m_impl.template packet<Unaligned>(inputIndex);
@@ -331,21 +331,21 @@ struct TensorEvaluator<const TensorPaddingOp<PaddingDimensions, ArgType>, Device
Index inputIndex = 0;
for (int i = 0; i < NumDims - 1; ++i) {
- const Index first = index;
- const Index last = index + PacketSize - 1;
+ const Index firstIdx = index;
+ const Index lastIdx = index + PacketSize - 1;
const Index lastPaddedLeft = m_padding[i].first * m_outputStrides[i+1];
const Index firstPaddedRight = (m_dimensions[i] - m_padding[i].second) * m_outputStrides[i+1];
const Index lastPaddedRight = m_outputStrides[i];
- if (!isLeftPaddingCompileTimeZero(i) && last < lastPaddedLeft) {
+ if (!isLeftPaddingCompileTimeZero(i) && lastIdx < lastPaddedLeft) {
// all the coefficient are in the padding zone.
return internal::pset1<PacketReturnType>(m_paddingValue);
}
- else if (!isRightPaddingCompileTimeZero(i) && first >= firstPaddedRight && last < lastPaddedRight) {
+ else if (!isRightPaddingCompileTimeZero(i) && firstIdx >= firstPaddedRight && lastIdx < lastPaddedRight) {
// all the coefficient are in the padding zone.
return internal::pset1<PacketReturnType>(m_paddingValue);
}
- else if ((isLeftPaddingCompileTimeZero(i) && isRightPaddingCompileTimeZero(i)) || (first >= lastPaddedLeft && last < firstPaddedRight)) {
+ else if ((isLeftPaddingCompileTimeZero(i) && isRightPaddingCompileTimeZero(i)) || (firstIdx >= lastPaddedLeft && lastIdx < firstPaddedRight)) {
// all the coefficient are between the 2 padding zones.
const Index idx = index / m_outputStrides[i+1];
inputIndex += (idx - m_padding[i].first) * m_inputStrides[i];
@@ -357,21 +357,21 @@ struct TensorEvaluator<const TensorPaddingOp<PaddingDimensions, ArgType>, Device
}
}
- const Index last = index + PacketSize - 1;
- const Index first = index;
+ const Index lastIdx = index + PacketSize - 1;
+ const Index firstIdx = index;
const Index lastPaddedLeft = m_padding[NumDims-1].first;
const Index firstPaddedRight = (m_dimensions[NumDims-1] - m_padding[NumDims-1].second);
const Index lastPaddedRight = m_outputStrides[NumDims-1];
- if (!isLeftPaddingCompileTimeZero(NumDims-1) && last < lastPaddedLeft) {
+ if (!isLeftPaddingCompileTimeZero(NumDims-1) && lastIdx < lastPaddedLeft) {
// all the coefficient are in the padding zone.
return internal::pset1<PacketReturnType>(m_paddingValue);
}
- else if (!isRightPaddingCompileTimeZero(NumDims-1) && first >= firstPaddedRight && last < lastPaddedRight) {
+ else if (!isRightPaddingCompileTimeZero(NumDims-1) && firstIdx >= firstPaddedRight && lastIdx < lastPaddedRight) {
// all the coefficient are in the padding zone.
return internal::pset1<PacketReturnType>(m_paddingValue);
}
- else if ((isLeftPaddingCompileTimeZero(NumDims-1) && isRightPaddingCompileTimeZero(NumDims-1)) || (first >= lastPaddedLeft && last < firstPaddedRight)) {
+ else if ((isLeftPaddingCompileTimeZero(NumDims-1) && isRightPaddingCompileTimeZero(NumDims-1)) || (firstIdx >= lastPaddedLeft && lastIdx < firstPaddedRight)) {
// all the coefficient are between the 2 padding zones.
inputIndex += (index - m_padding[NumDims-1].first);
return m_impl.template packet<Unaligned>(inputIndex);
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorReductionGpu.h b/unsupported/Eigen/CXX11/src/Tensor/TensorReductionGpu.h
index 7504c1598..88940e6e6 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorReductionGpu.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorReductionGpu.h
@@ -208,8 +208,8 @@ __global__ void ReductionInitFullReduxKernelHalfFloat(Reducer reducer, const Sel
eigen_assert(blockDim.x == 1);
eigen_assert(gridDim.x == 1);
if (num_coeffs % 2 != 0) {
- half last = input.m_impl.coeff(num_coeffs-1);
- *scratch = __halves2half2(last, reducer.initialize());
+ half lastCoeff = input.m_impl.coeff(num_coeffs-1);
+ *scratch = __halves2half2(lastCoeff, reducer.initialize());
} else {
*scratch = reducer.template initializePacket<half2>();
}