aboutsummaryrefslogtreecommitdiffhomepage
path: root/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h
diff options
context:
space:
mode:
authorGravatar Eugene Zhulenev <ezhulenev@google.com>2018-07-31 15:56:31 -0700
committerGravatar Eugene Zhulenev <ezhulenev@google.com>2018-07-31 15:56:31 -0700
commit83c0a16baf5ecac6288cd9b74536a82de8985b31 (patch)
treefc284a18d1a457eecfb2ce4aeda26bd0c78f2056 /unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h
parentd6568425f8b384ef36da1f16be76a3977fb90846 (diff)
Add block evaluation support to TensorOps
Diffstat (limited to 'unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h')
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h14
1 files changed, 6 insertions, 8 deletions
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h b/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h
index 84cf6d216..33c4ef5b7 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h
@@ -214,7 +214,7 @@ class TensorBlockIO {
num_size_one_inner_dims, NumDims - num_size_one_inner_dims - 1);
const StorageIndex block_dim_for_tensor_stride1_dim =
NumDims == 0 ? 1 : tensor_to_block_dim_map[tensor_stride1_dim];
- size_t block_inner_dim_size =
+ Index block_inner_dim_size =
NumDims == 0 ? 1
: block.block_sizes()[block_dim_for_tensor_stride1_dim];
for (int i = num_size_one_inner_dims + 1; i < NumDims; ++i) {
@@ -745,16 +745,15 @@ class TensorBlockMapper {
if (block_shape == TensorBlockShapeType::kUniformAllDims) {
// Tensor will not fit within 'min_target_size' budget: calculate tensor
// block dimension sizes based on "square" dimension size target.
- const size_t dim_size_target = static_cast<const size_t>(
+ const Index dim_size_target = static_cast<Index>(
std::pow(static_cast<float>(min_target_size),
1.0 / static_cast<float>(block_dim_sizes.rank())));
- for (size_t i = 0; i < block_dim_sizes.rank(); ++i) {
+ for (Index i = 0; i < block_dim_sizes.rank(); ++i) {
// TODO(andydavis) Adjust the inner most 'block_dim_size' to make it
// a multiple of the packet size. Note that reducing
// 'block_dim_size' in this manner can increase the number of
// blocks, and so will amplify any per-block overhead.
- block_dim_sizes[i] = numext::mini(
- dim_size_target, static_cast<size_t>(tensor_dims[i]));
+ block_dim_sizes[i] = numext::mini(dim_size_target, tensor_dims[i]);
}
// Add any un-allocated coefficients to inner dimension(s).
StorageIndex total_size = block_dim_sizes.TotalSize();
@@ -789,9 +788,8 @@ class TensorBlockMapper {
}
}
- eigen_assert(
- block_dim_sizes.TotalSize() >=
- numext::mini<size_t>(min_target_size, tensor_dims.TotalSize()));
+ eigen_assert(block_dim_sizes.TotalSize() >=
+ numext::mini<Index>(min_target_size, tensor_dims.TotalSize()));
return block_dim_sizes;
}