diff options
Diffstat (limited to 'unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h')
-rw-r--r-- | unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h | 36 |
1 files changed, 17 insertions, 19 deletions
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h b/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h index 45ddfdb39..877603421 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h @@ -152,11 +152,11 @@ struct TensorBlockCopyOp { const Scalar* src_base = &src_data[src_index]; Scalar* dst_base = &dst_data[dst_index]; - using Src = const Eigen::Array<Scalar, Dynamic, 1>; - using Dst = Eigen::Array<Scalar, Dynamic, 1>; + typedef const Eigen::Array<Scalar, Dynamic, 1> Src; + typedef Eigen::Array<Scalar, Dynamic, 1> Dst; - using SrcMap = Eigen::Map<Src, 0, InnerStride<>>; - using DstMap = Eigen::Map<Dst, 0, InnerStride<>>; + typedef Eigen::Map<Src, 0, InnerStride<>> SrcMap; + typedef Eigen::Map<Dst, 0, InnerStride<>> DstMap; const SrcMap src(src_base, num_coeff_to_copy, InnerStride<>(src_stride)); DstMap dst(dst_base, num_coeff_to_copy, InnerStride<>(dst_stride)); @@ -214,7 +214,7 @@ class TensorBlockIO { num_size_one_inner_dims, NumDims - num_size_one_inner_dims - 1); const StorageIndex block_dim_for_tensor_stride1_dim = NumDims == 0 ? 1 : tensor_to_block_dim_map[tensor_stride1_dim]; - size_t block_inner_dim_size = + Index block_inner_dim_size = NumDims == 0 ? 1 : block.block_sizes()[block_dim_for_tensor_stride1_dim]; for (int i = num_size_one_inner_dims + 1; i < NumDims; ++i) { @@ -401,13 +401,13 @@ struct TensorBlockCwiseBinaryOp { const StorageIndex left_stride, const LeftScalar* left_data, const StorageIndex right_index, const StorageIndex right_stride, const RightScalar* right_data) { - using Lhs = const Eigen::Array<LeftScalar, Dynamic, 1>; - using Rhs = const Eigen::Array<RightScalar, Dynamic, 1>; - using Out = Eigen::Array<OutputScalar, Dynamic, 1>; + typedef const Eigen::Array<LeftScalar, Dynamic, 1> Lhs; + typedef const Eigen::Array<RightScalar, Dynamic, 1> Rhs; + typedef Eigen::Array<OutputScalar, Dynamic, 1> Out; - using LhsMap = Eigen::Map<Lhs, 0, InnerStride<>>; - using RhsMap = Eigen::Map<Rhs, 0, InnerStride<>>; - using OutMap = Eigen::Map<Out, 0, InnerStride<>>; + typedef Eigen::Map<Lhs, 0, InnerStride<>> LhsMap; + typedef Eigen::Map<Rhs, 0, InnerStride<>> RhsMap; + typedef Eigen::Map<Out, 0, InnerStride<>> OutMap; const LeftScalar* lhs_base = &left_data[left_index]; const RightScalar* rhs_base = &right_data[right_index]; @@ -745,16 +745,15 @@ class TensorBlockMapper { if (block_shape == TensorBlockShapeType::kUniformAllDims) { // Tensor will not fit within 'min_target_size' budget: calculate tensor // block dimension sizes based on "square" dimension size target. - const size_t dim_size_target = static_cast<const size_t>( + const Index dim_size_target = static_cast<Index>( std::pow(static_cast<float>(min_target_size), - 1.0f / static_cast<float>(block_dim_sizes.rank()))); - for (size_t i = 0; i < block_dim_sizes.rank(); ++i) { + 1.0 / static_cast<float>(block_dim_sizes.rank()))); + for (Index i = 0; i < block_dim_sizes.rank(); ++i) { // TODO(andydavis) Adjust the inner most 'block_dim_size' to make it // a multiple of the packet size. Note that reducing // 'block_dim_size' in this manner can increase the number of // blocks, and so will amplify any per-block overhead. - block_dim_sizes[i] = numext::mini( - dim_size_target, static_cast<size_t>(tensor_dims[i])); + block_dim_sizes[i] = numext::mini(dim_size_target, tensor_dims[i]); } // Add any un-allocated coefficients to inner dimension(s). StorageIndex total_size = block_dim_sizes.TotalSize(); @@ -789,9 +788,8 @@ class TensorBlockMapper { } } - eigen_assert( - block_dim_sizes.TotalSize() >= - numext::mini<size_t>(min_target_size, tensor_dims.TotalSize())); + eigen_assert(block_dim_sizes.TotalSize() >= + numext::mini<Index>(min_target_size, tensor_dims.TotalSize())); return block_dim_sizes; } |