From c696dbcaa6e17cdfa6c9ff37dadf89cf4b707504 Mon Sep 17 00:00:00 2001 From: Gael Guennebaud Date: Fri, 21 Sep 2018 23:02:33 +0200 Subject: Fiw shadowing of last and all --- .../Eigen/CXX11/src/Tensor/TensorExecutor.h | 48 +++++++++++----------- 1 file changed, 24 insertions(+), 24 deletions(-) (limited to 'unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h') diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h b/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h index bfe1f97b8..1c44541bd 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h @@ -165,11 +165,11 @@ class TensorExecutor struct EvalRange { - static void run(Evaluator* evaluator_in, const StorageIndex first, - const StorageIndex last) { + static void run(Evaluator* evaluator_in, const StorageIndex firstIdx, + const StorageIndex lastIdx) { Evaluator evaluator = *evaluator_in; - eigen_assert(last >= first); - for (StorageIndex i = first; i < last; ++i) { + eigen_assert(lastIdx >= firstIdx); + for (StorageIndex i = firstIdx; i < lastIdx; ++i) { evaluator.evalScalar(i); } } @@ -182,14 +182,14 @@ struct EvalRange { static const int PacketSize = unpacket_traits::size; - static void run(Evaluator* evaluator_in, const StorageIndex first, - const StorageIndex last) { + static void run(Evaluator* evaluator_in, const StorageIndex firstIdx, + const StorageIndex lastIdx) { Evaluator evaluator = *evaluator_in; - eigen_assert(last >= first); - StorageIndex i = first; - if (last - first >= PacketSize) { - eigen_assert(first % PacketSize == 0); - StorageIndex last_chunk_offset = last - 4 * PacketSize; + eigen_assert(lastIdx >= firstIdx); + StorageIndex i = firstIdx; + if (lastIdx - firstIdx >= PacketSize) { + eigen_assert(firstIdx % PacketSize == 0); + StorageIndex last_chunk_offset = lastIdx - 4 * PacketSize; // Give compiler a strong possibility to unroll the loop. But don't insist // on unrolling, because if the function is expensive compiler should not // unroll the loop at the expense of inlining. @@ -198,12 +198,12 @@ struct EvalRange { evaluator.evalPacket(i + j * PacketSize); } } - last_chunk_offset = last - PacketSize; + last_chunk_offset = lastIdx - PacketSize; for (; i <= last_chunk_offset; i += PacketSize) { evaluator.evalPacket(i); } } - for (; i < last; ++i) { + for (; i < lastIdx; ++i) { evaluator.evalScalar(i); } } @@ -234,8 +234,8 @@ class TensorExecutor { const StorageIndex size = array_prod(evaluator.dimensions()); device.parallelFor(size, evaluator.costPerCoeff(Vectorizable), EvalRange::alignBlockSize, - [&evaluator](StorageIndex first, StorageIndex last) { - EvalRange::run(&evaluator, first, last); + [&evaluator](StorageIndex firstIdx, StorageIndex lastIdx) { + EvalRange::run(&evaluator, firstIdx, lastIdx); }); } evaluator.cleanup(); @@ -292,8 +292,8 @@ class TensorExecutor= -1 && thread_idx < num_threads); Scalar* thread_buf = reinterpret_cast( static_cast(buf) + aligned_blocksize * (thread_idx + 1)); - for (StorageIndex i = first; i < last; ++i) { + for (StorageIndex i = firstIdx; i < lastIdx; ++i) { auto block = block_mapper.GetBlockForIndex(i, thread_buf); evaluator.evalBlock(&block); } @@ -330,8 +330,8 @@ class TensorExecutor { template struct EigenMetaKernelEval { static __device__ EIGEN_ALWAYS_INLINE - void run(Evaluator& eval, StorageIndex first, StorageIndex last, StorageIndex step_size) { - for (StorageIndex i = first; i < last; i += step_size) { + void run(Evaluator& eval, StorageIndex firstIdx, StorageIndex lastIdx, StorageIndex step_size) { + for (StorageIndex i = firstIdx; i < lastIdx; i += step_size) { eval.evalScalar(i); } } @@ -340,17 +340,17 @@ struct EigenMetaKernelEval { template struct EigenMetaKernelEval { static __device__ EIGEN_ALWAYS_INLINE - void run(Evaluator& eval, StorageIndex first, StorageIndex last, StorageIndex step_size) { + void run(Evaluator& eval, StorageIndex firstIdx, StorageIndex lastIdx, StorageIndex step_size) { const StorageIndex PacketSize = unpacket_traits::size; - const StorageIndex vectorized_size = (last / PacketSize) * PacketSize; + const StorageIndex vectorized_size = (lastIdx / PacketSize) * PacketSize; const StorageIndex vectorized_step_size = step_size * PacketSize; // Use the vector path - for (StorageIndex i = first * PacketSize; i < vectorized_size; + for (StorageIndex i = firstIdx * PacketSize; i < vectorized_size; i += vectorized_step_size) { eval.evalPacket(i); } - for (StorageIndex i = vectorized_size + first; i < last; i += step_size) { + for (StorageIndex i = vectorized_size + firstIdx; i < lastIdx; i += step_size) { eval.evalScalar(i); } } -- cgit v1.2.3