aboutsummaryrefslogtreecommitdiffhomepage
path: root/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h
diff options
context:
space:
mode:
authorGravatar Eugene Zhulenev <ezhulenev@google.com>2018-08-27 14:34:07 -0700
committerGravatar Eugene Zhulenev <ezhulenev@google.com>2018-08-27 14:34:07 -0700
commitc144bb355b74f4600156284e8202fcf9c0c135d8 (patch)
tree3e35d145c624b544906a25a447e07104960cd77e /unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h
parent35d90e89600ff2524ec8bdd4ef4b95dd7c78b656 (diff)
parent57472886764ff71ad45338c6538649f7a8fa3d0e (diff)
Merge with upstream eigen/default
Diffstat (limited to 'unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h')
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h17
1 files changed, 6 insertions, 11 deletions
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h b/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h
index d9b61dc70..ba5ab1396 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h
@@ -133,7 +133,7 @@ class TensorExecutor<Expression, DefaultDevice, Vectorizable,
if (needs_assign) {
// Size tensor blocks to fit in cache (or requested target block size).
Index block_total_size = numext::mini(cache_size, total_size);
- TensorBlockShapeType block_shape = TensorBlockShapeType::kSkewedInnerDims;
+ TensorBlockShapeType block_shape = kSkewedInnerDims;
// Query expression tree for desired block size/shape.
std::vector<TensorOpResourceRequirements> resources;
evaluator.getResourceRequirements(&resources);
@@ -229,12 +229,8 @@ class TensorExecutor<Expression, ThreadPoolDevice, Vectorizable, Tileable> {
typedef EvalRange<Evaluator, StorageIndex, Vectorizable> EvalRange;
Evaluator evaluator(expr, device);
- const bool needs_assign = evaluator.evalSubExprsIfNeeded(nullptr);
+ const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL);
if (needs_assign) {
- const StorageIndex PacketSize =
- Vectorizable
- ? unpacket_traits<typename Evaluator::PacketReturnType>::size
- : 1;
const StorageIndex size = array_prod(evaluator.dimensions());
device.parallelFor(size, evaluator.costPerCoeff(Vectorizable),
EvalRange::alignBlockSize,
@@ -259,12 +255,11 @@ class TensorExecutor<Expression, ThreadPoolDevice, Vectorizable, /*Tileable*/ tr
static EIGEN_STRONG_INLINE void run(const Expression& expr,
const ThreadPoolDevice& device) {
- typedef TensorBlock<ScalarNoConst, StorageIndex, NumDims, Evaluator::Layout> TensorBlock;
typedef TensorBlockMapper<ScalarNoConst, StorageIndex, NumDims, Evaluator::Layout> TensorBlockMapper;
Evaluator evaluator(expr, device);
- StorageIndex total_size = array_prod(evaluator.dimensions());
- StorageIndex cache_size = device.firstLevelCacheSize() / sizeof(Scalar);
+ Index total_size = array_prod(evaluator.dimensions());
+ Index cache_size = device.firstLevelCacheSize() / sizeof(Scalar);
if (total_size < cache_size) {
// TODO(andydavis) Reduce block management overhead for small tensors.
internal::TensorExecutor<Expression, ThreadPoolDevice, Vectorizable,
@@ -273,9 +268,9 @@ class TensorExecutor<Expression, ThreadPoolDevice, Vectorizable, /*Tileable*/ tr
return;
}
- const bool needs_assign = evaluator.evalSubExprsIfNeeded(nullptr);
+ const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL);
if (needs_assign) {
- TensorBlockShapeType block_shape = TensorBlockShapeType::kSkewedInnerDims;
+ TensorBlockShapeType block_shape = kSkewedInnerDims;
Index block_total_size = 0;
// Query expression tree for desired block size/shape.
std::vector<internal::TensorOpResourceRequirements> resources;