diff options
author | Eugene Zhulenev <ezhulenev@google.com> | 2019-06-28 11:13:44 -0700 |
---|---|---|
committer | Eugene Zhulenev <ezhulenev@google.com> | 2019-06-28 11:13:44 -0700 |
commit | 878845cb25c1ba9e56883fd0654eafb55a22fc34 (patch) | |
tree | 848fdcee1dc377feee2ef45495b3ad21839d0244 /unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h | |
parent | 16a56b2dddbfaf2d4b81d62be5e3139f12783ac8 (diff) |
Add block access to TensorReverseOp and make sure that TensorForcedEval uses block access when preferred
Diffstat (limited to 'unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h')
-rw-r--r-- | unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h | 2 |
1 files changed, 1 insertions, 1 deletions
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h b/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h index 7b5842571..647c98d4e 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h @@ -346,7 +346,7 @@ class TensorExecutor<Expression, ThreadPoolDevice, Vectorizable, /*Tileable*/ tr // expressions. const int thread_idx = device.currentThreadId(); eigen_assert(thread_idx >= -1 && thread_idx < num_threads); - Scalar* thread_buf = reinterpret_cast<Scalar*>( + ScalarNoConst* thread_buf = reinterpret_cast<ScalarNoConst*>( static_cast<char*>(buf) + aligned_blocksize * (thread_idx + 1)); for (StorageIndex i = firstIdx; i < lastIdx; ++i) { auto block = block_mapper.GetBlockForIndex(i, thread_buf); |