From e6d5be811ddab928ae7ed73f76e1c4c8e18917e2 Mon Sep 17 00:00:00 2001 From: Benoit Steiner Date: Mon, 13 Aug 2018 10:29:21 -0700 Subject: Fixed syntax of nested templates chevrons to make it compatible with c++97 mode. --- unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h') diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h b/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h index cbf91013b..3904552a9 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h @@ -155,8 +155,8 @@ struct TensorBlockCopyOp { typedef const Eigen::Array Src; typedef Eigen::Array Dst; - typedef Eigen::Map> SrcMap; - typedef Eigen::Map> DstMap; + typedef Eigen::Map > SrcMap; + typedef Eigen::Map > DstMap; const SrcMap src(src_base, num_coeff_to_copy, InnerStride<>(src_stride)); DstMap dst(dst_base, num_coeff_to_copy, InnerStride<>(dst_stride)); @@ -405,9 +405,9 @@ struct TensorBlockCwiseBinaryOp { typedef const Eigen::Array Rhs; typedef Eigen::Array Out; - typedef Eigen::Map> LhsMap; - typedef Eigen::Map> RhsMap; - typedef Eigen::Map> OutMap; + typedef Eigen::Map > LhsMap; + typedef Eigen::Map > RhsMap; + typedef Eigen::Map > OutMap; const LeftScalar* lhs_base = &left_data[left_index]; const RightScalar* rhs_base = &right_data[right_index]; -- cgit v1.2.3 From 3810ec228fbc9ff8fff23a997c09a490f319c902 Mon Sep 17 00:00:00 2001 From: Benoit Steiner Date: Mon, 13 Aug 2018 10:46:09 -0700 Subject: Don't use the auto keyword since it's not always supported properly. --- unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h') diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h b/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h index 3904552a9..21a6b66e8 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h @@ -501,7 +501,7 @@ struct TensorBlockCwiseBinaryIO { if (size == 1) { continue; } - auto& state = block_iter_state[num_squeezed_dims]; + BlockIteratorState& state = block_iter_state[num_squeezed_dims]; state.output_stride = block_strides[dim]; state.left_stride = left_strides[dim]; state.right_stride = right_strides[dim]; @@ -523,7 +523,7 @@ struct TensorBlockCwiseBinaryIO { right_stride, right_data); // Update index. for (int j = 0; j < num_squeezed_dims; ++j) { - auto& state = block_iter_state[j]; + BlockIteratorState& state = block_iter_state[j]; if (++state.count < state.size) { output_index += state.output_stride; left_index += state.left_stride; -- cgit v1.2.3 From 59bba77ead210f71b61ee6c551207c6f062bc123 Mon Sep 17 00:00:00 2001 From: Benoit Steiner Date: Tue, 14 Aug 2018 10:54:48 -0700 Subject: Fixed compilation errors with gcc 4.7 and 4.8 --- unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h | 26 +++++++++++----------- .../Eigen/CXX11/src/Tensor/TensorContraction.h | 2 +- 2 files changed, 14 insertions(+), 14 deletions(-) (limited to 'unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h') diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h b/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h index 21a6b66e8..322260011 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h @@ -73,7 +73,7 @@ struct TensorOpResourceRequirements { // expression tree (like reductions) to communicate resources // requirements based on local state (like the total number of reductions // to be computed). - TensorOpResourceRequirements(internal::TensorBlockShapeType shape, + TensorOpResourceRequirements(TensorBlockShapeType shape, const Index size) : block_shape(shape), block_total_size(size) {} }; @@ -90,9 +90,9 @@ EIGEN_STRONG_INLINE void MergeResourceRequirements( *block_shape = resources[0].block_shape; *block_total_size = resources[0].block_total_size; for (std::vector::size_type i = 1; i < resources.size(); ++i) { - if (resources[i].block_shape == TensorBlockShapeType::kSkewedInnerDims && - *block_shape != TensorBlockShapeType::kSkewedInnerDims) { - *block_shape = TensorBlockShapeType::kSkewedInnerDims; + if (resources[i].block_shape == kSkewedInnerDims && + *block_shape ! kSkewedInnerDims) { + *block_shape = kSkewedInnerDims; } *block_total_size = numext::maxi(*block_total_size, resources[i].block_total_size); @@ -178,9 +178,9 @@ template class TensorBlockIO { public: - typedef typename internal::TensorBlock + typedef typename TensorBlock TensorBlock; - typedef typename internal::TensorBlockCopyOp + typedef typename TensorBlockCopyOp TensorBlockCopyOp; protected: @@ -320,7 +320,7 @@ template class TensorBlockReader : public TensorBlockIO { public: - typedef typename internal::TensorBlock + typedef typename TensorBlock TensorBlock; typedef TensorBlockIO Base; @@ -357,7 +357,7 @@ template class TensorBlockWriter : public TensorBlockIO { public: - typedef typename internal::TensorBlock + typedef typename TensorBlock TensorBlock; typedef TensorBlockIO Base; @@ -434,7 +434,7 @@ struct TensorBlockCwiseBinaryOp { template struct TensorBlockCwiseBinaryIO { - typedef typename internal::TensorBlock::Dimensions Dimensions; struct BlockIteratorState { @@ -627,7 +627,7 @@ struct TensorBlockView { template class TensorBlockMapper { public: - typedef typename internal::TensorBlock + typedef typename TensorBlock TensorBlock; typedef DSizes Dimensions; @@ -742,7 +742,7 @@ class TensorBlockMapper { block_dim_sizes[i] = 1; } } else if (block_dim_sizes.TotalSize() > min_target_size) { - if (block_shape == TensorBlockShapeType::kUniformAllDims) { + if (block_shape == kUniformAllDims) { // Tensor will not fit within 'min_target_size' budget: calculate tensor // block dimension sizes based on "square" dimension size target. const size_t dim_size_target = static_cast( @@ -773,7 +773,7 @@ class TensorBlockMapper { total_size = total_size_other_dims * block_dim_sizes[dim]; } } - } else if (block_shape == TensorBlockShapeType::kSkewedInnerDims) { + } else if (block_shape == kSkewedInnerDims) { StorageIndex coeff_to_allocate = min_target_size; for (int i = 0; i < NumDims; ++i) { const int dim = cond()(i, NumDims - i - 1); @@ -818,7 +818,7 @@ class TensorBlockMapper { template class TensorSliceBlockMapper { public: - typedef typename internal::TensorBlock + typedef typename TensorBlock TensorBlock; typedef DSizes Dimensions; diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h index e604456e8..a023718c6 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h @@ -155,7 +155,7 @@ struct TensorContractionParams { // See expected implementation in NoOpOutputKernel. struct OutputKernel { template - using OutputMapper = internal::blas_data_mapper; + typedef internal::blas_data_mapper OutputMapper; }; // Output kernel that does absolutely nothing. -- cgit v1.2.3 From ab3f481141a6bc72d2bbdc6300fb9dc157029ea9 Mon Sep 17 00:00:00 2001 From: Benoit Steiner Date: Tue, 14 Aug 2018 14:05:46 -0700 Subject: Cleaned up the code and make it compile with more compilers --- unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h | 76 ++++++++++-------------- 1 file changed, 32 insertions(+), 44 deletions(-) (limited to 'unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h') diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h b/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h index 322260011..24a6343e8 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h @@ -91,7 +91,7 @@ EIGEN_STRONG_INLINE void MergeResourceRequirements( *block_total_size = resources[0].block_total_size; for (std::vector::size_type i = 1; i < resources.size(); ++i) { if (resources[i].block_shape == kSkewedInnerDims && - *block_shape ! kSkewedInnerDims) { + *block_shape != kSkewedInnerDims) { *block_shape = kSkewedInnerDims; } *block_total_size = @@ -152,11 +152,11 @@ struct TensorBlockCopyOp { const Scalar* src_base = &src_data[src_index]; Scalar* dst_base = &dst_data[dst_index]; - typedef const Eigen::Array Src; - typedef Eigen::Array Dst; + typedef const Array Src; + typedef Array Dst; - typedef Eigen::Map > SrcMap; - typedef Eigen::Map > DstMap; + typedef Map > SrcMap; + typedef Map > DstMap; const SrcMap src(src_base, num_coeff_to_copy, InnerStride<>(src_stride)); DstMap dst(dst_base, num_coeff_to_copy, InnerStride<>(dst_stride)); @@ -178,10 +178,8 @@ template class TensorBlockIO { public: - typedef typename TensorBlock - TensorBlock; - typedef typename TensorBlockCopyOp - TensorBlockCopyOp; + typedef TensorBlock Block; + typedef TensorBlockCopyOp BlockCopyOp; protected: struct BlockIteratorState { @@ -194,7 +192,7 @@ class TensorBlockIO { }; static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Copy( - const TensorBlock& block, StorageIndex first_coeff_index, + const Block& block, StorageIndex first_coeff_index, const array& tensor_to_block_dim_map, const array& tensor_strides, const Scalar* src_data, Scalar* dst_data) { @@ -290,8 +288,8 @@ class TensorBlockIO { const StorageIndex block_total_size = NumDims == 0 ? 1 : block.block_sizes().TotalSize(); for (StorageIndex i = 0; i < block_total_size; i += block_inner_dim_size) { - TensorBlockCopyOp::Run(block_inner_dim_size, outputIndex, output_stride, - dst_data, inputIndex, input_stride, src_data); + BlockCopyOp::Run(block_inner_dim_size, outputIndex, output_stride, + dst_data, inputIndex, input_stride, src_data); // Update index. for (int j = 0; j < num_squeezed_dims; ++j) { if (++block_iter_state[j].count < block_iter_state[j].size) { @@ -320,13 +318,11 @@ template class TensorBlockReader : public TensorBlockIO { public: - typedef typename TensorBlock - TensorBlock; - typedef TensorBlockIO - Base; + typedef TensorBlock Block; + typedef TensorBlockIO Base; static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Run( - TensorBlock* block, const Scalar* src_data) { + Block* block, const Scalar* src_data) { array tensor_to_block_dim_map; for (int i = 0; i < NumDims; ++i) { tensor_to_block_dim_map[i] = i; @@ -336,7 +332,7 @@ class TensorBlockReader : public TensorBlockIO& tensor_to_block_dim_map, const array& tensor_strides, const Scalar* src_data) { Base::Copy(*block, first_coeff_index, tensor_to_block_dim_map, @@ -357,13 +353,11 @@ template class TensorBlockWriter : public TensorBlockIO { public: - typedef typename TensorBlock - TensorBlock; - typedef TensorBlockIO - Base; + typedef TensorBlock Block; + typedef TensorBlockIO Base; static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Run( - const TensorBlock& block, Scalar* dst_data) { + const Block& block, Scalar* dst_data) { array tensor_to_block_dim_map; for (int i = 0; i < NumDims; ++i) { tensor_to_block_dim_map[i] = i; @@ -373,7 +367,7 @@ class TensorBlockWriter : public TensorBlockIO& tensor_to_block_dim_map, const array& tensor_strides, Scalar* dst_data) { Base::Copy(block, first_coeff_index, tensor_to_block_dim_map, @@ -401,13 +395,13 @@ struct TensorBlockCwiseBinaryOp { const StorageIndex left_stride, const LeftScalar* left_data, const StorageIndex right_index, const StorageIndex right_stride, const RightScalar* right_data) { - typedef const Eigen::Array Lhs; - typedef const Eigen::Array Rhs; - typedef Eigen::Array Out; + typedef const Array Lhs; + typedef const Array Rhs; + typedef Array Out; - typedef Eigen::Map > LhsMap; - typedef Eigen::Map > RhsMap; - typedef Eigen::Map > OutMap; + typedef Map > LhsMap; + typedef Map > RhsMap; + typedef Map > OutMap; const LeftScalar* lhs_base = &left_data[left_index]; const RightScalar* rhs_base = &right_data[right_index]; @@ -417,8 +411,7 @@ struct TensorBlockCwiseBinaryOp { const RhsMap rhs(rhs_base, num_coeff, InnerStride<>(right_stride)); OutMap out(out_base, num_coeff, InnerStride<>(output_stride)); - out = - Eigen::CwiseBinaryOp(lhs, rhs, functor); + out = CwiseBinaryOp(lhs, rhs, functor); } }; @@ -434,8 +427,7 @@ struct TensorBlockCwiseBinaryOp { template struct TensorBlockCwiseBinaryIO { - typedef typename TensorBlock::Dimensions Dimensions; + typedef typename TensorBlock::Dimensions Dimensions; struct BlockIteratorState { StorageIndex output_stride, output_span; @@ -627,8 +619,7 @@ struct TensorBlockView { template class TensorBlockMapper { public: - typedef typename TensorBlock - TensorBlock; + typedef TensorBlock Block; typedef DSizes Dimensions; TensorBlockMapper(const Dimensions& dims, @@ -663,7 +654,7 @@ class TensorBlockMapper { } } - EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Block GetBlockForIndex(StorageIndex block_index, Scalar* data) const { StorageIndex first_coeff_index = 0; DSizes coords; @@ -711,8 +702,7 @@ class TensorBlockMapper { } } - return TensorBlock(first_coeff_index, sizes, strides, m_tensor_strides, - data); + return Block(first_coeff_index, sizes, strides, m_tensor_strides, data); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE StorageIndex total_block_count() const { @@ -818,8 +808,7 @@ class TensorBlockMapper { template class TensorSliceBlockMapper { public: - typedef typename TensorBlock - TensorBlock; + typedef TensorBlock Block; typedef DSizes Dimensions; TensorSliceBlockMapper(const Dimensions& tensor_dims, @@ -860,7 +849,7 @@ class TensorSliceBlockMapper { } } - EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Block GetBlockForIndex(StorageIndex block_index, Scalar* data) const { StorageIndex first_coeff_index = 0; DSizes coords; @@ -917,8 +906,7 @@ class TensorSliceBlockMapper { } } - return TensorBlock(first_coeff_index, sizes, strides, m_tensor_strides, - data); + return Block(first_coeff_index, sizes, strides, m_tensor_strides, data); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE StorageIndex total_block_count() const { -- cgit v1.2.3 From f7675b826b2f0a33b09b97342e855a8ef059927e Mon Sep 17 00:00:00 2001 From: Christoph Hertzberg Date: Fri, 24 Aug 2018 22:58:55 +0200 Subject: Fix several integer conversion and sign-compare warnings --- unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h | 27 ++++---- .../Eigen/CXX11/src/Tensor/TensorDimensions.h | 72 +++++++++++----------- .../Eigen/CXX11/src/Tensor/TensorExecutor.h | 4 +- .../Eigen/CXX11/src/Tensor/TensorIndexList.h | 2 +- unsupported/test/cxx11_tensor_block_access.cpp | 54 ++++++++-------- unsupported/test/cxx11_tensor_thread_pool.cpp | 2 +- 6 files changed, 80 insertions(+), 81 deletions(-) (limited to 'unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h') diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h b/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h index 24a6343e8..aa500eb70 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h @@ -212,11 +212,11 @@ class TensorBlockIO { num_size_one_inner_dims, NumDims - num_size_one_inner_dims - 1); const StorageIndex block_dim_for_tensor_stride1_dim = NumDims == 0 ? 1 : tensor_to_block_dim_map[tensor_stride1_dim]; - size_t block_inner_dim_size = + StorageIndex block_inner_dim_size = NumDims == 0 ? 1 : block.block_sizes()[block_dim_for_tensor_stride1_dim]; - for (int i = num_size_one_inner_dims + 1; i < NumDims; ++i) { - const int dim = cond()(i, NumDims - i - 1); + for (Index i = num_size_one_inner_dims + 1; i < NumDims; ++i) { + const Index dim = cond()(i, NumDims - i - 1); const StorageIndex block_stride = block.block_strides()[tensor_to_block_dim_map[dim]]; if (block_inner_dim_size == block_stride && @@ -258,8 +258,8 @@ class TensorBlockIO { // Initialize block iterator state. Squeeze away any dimension of size 1. int num_squeezed_dims = 0; - for (int i = num_size_one_inner_dims; i < NumDims - 1; ++i) { - const int dim = cond()(i + 1, NumDims - i - 2); + for (Index i = num_size_one_inner_dims; i < NumDims - 1; ++i) { + const Index dim = cond()(i + 1, NumDims - i - 2); const StorageIndex size = block.block_sizes()[tensor_to_block_dim_map[dim]]; if (size == 1) { continue; @@ -626,7 +626,7 @@ class TensorBlockMapper { const TensorBlockShapeType block_shape, Index min_target_size) : m_dimensions(dims), - m_block_dim_sizes(BlockDimensions(dims, block_shape, min_target_size)) { + m_block_dim_sizes(BlockDimensions(dims, block_shape, internal::convert_index(min_target_size))) { // Calculate block counts by dimension and total block count. DSizes block_count; for (Index i = 0; i < block_count.rank(); ++i) { @@ -717,8 +717,8 @@ class TensorBlockMapper { private: static Dimensions BlockDimensions(const Dimensions& tensor_dims, const TensorBlockShapeType block_shape, - Index min_target_size) { - min_target_size = numext::maxi(1, min_target_size); + StorageIndex min_target_size) { + min_target_size = numext::maxi(1, min_target_size); // If tensor fully fits into the target size, we'll treat it a single block. Dimensions block_dim_sizes = tensor_dims; @@ -735,16 +735,15 @@ class TensorBlockMapper { if (block_shape == kUniformAllDims) { // Tensor will not fit within 'min_target_size' budget: calculate tensor // block dimension sizes based on "square" dimension size target. - const size_t dim_size_target = static_cast( + const StorageIndex dim_size_target = internal::convert_index( std::pow(static_cast(min_target_size), 1.0f / static_cast(block_dim_sizes.rank()))); - for (size_t i = 0; i < block_dim_sizes.rank(); ++i) { + for (Index i = 0; i < block_dim_sizes.rank(); ++i) { // TODO(andydavis) Adjust the inner most 'block_dim_size' to make it // a multiple of the packet size. Note that reducing // 'block_dim_size' in this manner can increase the number of // blocks, and so will amplify any per-block overhead. - block_dim_sizes[i] = numext::mini( - dim_size_target, static_cast(tensor_dims[i])); + block_dim_sizes[i] = numext::mini(dim_size_target, tensor_dims[i]); } // Add any un-allocated coefficients to inner dimension(s). StorageIndex total_size = block_dim_sizes.TotalSize(); @@ -781,7 +780,7 @@ class TensorBlockMapper { eigen_assert( block_dim_sizes.TotalSize() >= - numext::mini(min_target_size, tensor_dims.TotalSize())); + numext::mini(min_target_size, tensor_dims.TotalSize())); return block_dim_sizes; } @@ -824,7 +823,7 @@ class TensorSliceBlockMapper { m_total_block_count(1) { // Calculate block counts by dimension and total block count. DSizes block_count; - for (size_t i = 0; i < block_count.rank(); ++i) { + for (Index i = 0; i < block_count.rank(); ++i) { block_count[i] = divup(m_tensor_slice_extents[i], m_block_dim_sizes[i]); } m_total_block_count = array_prod(block_count); diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorDimensions.h b/unsupported/Eigen/CXX11/src/Tensor/TensorDimensions.h index 4f973a5b7..ce91bc2a6 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorDimensions.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorDimensions.h @@ -32,12 +32,12 @@ namespace Eigen { // Boilerplate code namespace internal { -template struct dget { +template struct dget { static const std::ptrdiff_t value = get::value; }; -template +template struct fixed_size_tensor_index_linearization_helper { template EIGEN_DEVICE_FUNC @@ -50,7 +50,7 @@ struct fixed_size_tensor_index_linearization_helper } }; -template +template struct fixed_size_tensor_index_linearization_helper { template EIGEN_DEVICE_FUNC @@ -60,7 +60,7 @@ struct fixed_size_tensor_index_linearization_helper +template struct fixed_size_tensor_index_extraction_helper { template EIGEN_DEVICE_FUNC @@ -94,7 +94,7 @@ struct Sizes { typedef internal::numeric_list Base; const Base t = Base(); static const std::ptrdiff_t total_size = internal::arg_prod(Indices...); - static const size_t count = Base::count; + static const ptrdiff_t count = Base::count; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t rank() const { return Base::count; @@ -121,16 +121,16 @@ struct Sizes { return *this; } - EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t operator[] (const std::size_t index) const { + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t operator[] (const std::ptrdiff_t index) const { return internal::fixed_size_tensor_index_extraction_helper::run(index, t); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE - size_t IndexOfColMajor(const array& indices) const { + ptrdiff_t IndexOfColMajor(const array& indices) const { return internal::fixed_size_tensor_index_linearization_helper::run(indices, t); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE - size_t IndexOfRowMajor(const array& indices) const { + ptrdiff_t IndexOfRowMajor(const array& indices) const { return internal::fixed_size_tensor_index_linearization_helper::run(indices, t); } }; @@ -144,25 +144,25 @@ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t array_prod(const Sizes +template struct non_zero_size { - typedef internal::type2val type; + typedef internal::type2val type; }; template <> struct non_zero_size<0> { typedef internal::null_type type; }; -template struct Sizes { +template struct Sizes { typedef typename internal::make_type_list::type, typename non_zero_size::type, typename non_zero_size::type, typename non_zero_size::type, typename non_zero_size::type >::type Base; - static const size_t count = Base::count; - static const std::size_t total_size = internal::arg_prod::value; + static const std::ptrdiff_t count = Base::count; + static const std::ptrdiff_t total_size = internal::arg_prod::value; - EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE size_t rank() const { + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ptrdiff_t rank() const { return count; } - static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE size_t TotalSize() { + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ptrdiff_t TotalSize() { return internal::arg_prod::value; } @@ -178,7 +178,7 @@ template Sizes(DenseIndex... /*indices*/) { } - explicit Sizes(std::initializer_list) { + explicit Sizes(std::initializer_list) { // todo: add assertion } #else @@ -213,18 +213,18 @@ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE - size_t IndexOfColMajor(const array& indices) const { + ptrdiff_t IndexOfColMajor(const array& indices) const { return internal::fixed_size_tensor_index_linearization_helper::run(indices, *reinterpret_cast(this)); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE - size_t IndexOfRowMajor(const array& indices) const { + ptrdiff_t IndexOfRowMajor(const array& indices) const { return internal::fixed_size_tensor_index_linearization_helper::run(indices, *reinterpret_cast(this)); } }; namespace internal { -template -EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::size_t array_prod(const Sizes&) { +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t array_prod(const Sizes&) { return Sizes::total_size; } } @@ -233,7 +233,7 @@ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::size_t array_prod(const Sizes +template struct tensor_index_linearization_helper { static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE @@ -245,7 +245,7 @@ struct tensor_index_linearization_helper } }; -template +template struct tensor_index_linearization_helper { static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE @@ -264,7 +264,7 @@ struct DSizes : array { typedef array Base; static const int count = NumDims; - EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE size_t rank() const { + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rank() const { return NumDims; } @@ -298,7 +298,7 @@ struct DSizes : array { } } #else - template + template EIGEN_DEVICE_FUNC DSizes(const Sizes& a) { for (int i = 0 ; i < NumDims; ++i) { (*this)[i] = a[i]; @@ -359,7 +359,7 @@ struct DSizes : array { // Boilerplate namespace internal { -template +template struct tensor_vsize_index_linearization_helper { static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE @@ -371,7 +371,7 @@ struct tensor_vsize_index_linearization_helper } }; -template +template struct tensor_vsize_index_linearization_helper { static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE @@ -386,10 +386,10 @@ struct tensor_vsize_index_linearization_helper namespace internal { template struct array_size > { - static const size_t value = NumDims; + static const ptrdiff_t value = NumDims; }; template struct array_size > { - static const size_t value = NumDims; + static const ptrdiff_t value = NumDims; }; #ifndef EIGEN_EMULATE_CXX11_META_H template struct array_size > { @@ -399,33 +399,33 @@ template struct array_size::count; }; template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t array_get(const Sizes&) { - return get >::value; + return get >::value; } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t array_get(const Sizes<>&) { eigen_assert(false && "should never be called"); return -1; } #else -template struct array_size > { - static const size_t value = Sizes::count; +template struct array_size > { + static const ptrdiff_t value = Sizes::count; }; -template struct array_size > { - static const size_t value = Sizes::count; +template struct array_size > { + static const ptrdiff_t value = Sizes::count; }; -template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::size_t array_get(const Sizes&) { +template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t array_get(const Sizes&) { return get::Base>::value; } #endif -template +template struct sizes_match_below_dim { static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool run(Dims1&, Dims2&) { return false; } }; -template +template struct sizes_match_below_dim { static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool run(Dims1& dims1, Dims2& dims2) { return (array_get(dims1) == array_get(dims2)) & diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h b/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h index 9b9587de5..b756be3b3 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h @@ -256,8 +256,8 @@ class TensorExecutor TensorBlockMapper; Evaluator evaluator(expr, device); - StorageIndex total_size = array_prod(evaluator.dimensions()); - StorageIndex cache_size = device.firstLevelCacheSize() / sizeof(Scalar); + Index total_size = array_prod(evaluator.dimensions()); + Index cache_size = device.firstLevelCacheSize() / sizeof(Scalar); if (total_size < cache_size) { // TODO(andydavis) Reduce block management overhead for small tensors. internal::TensorExecutor struct NumTraits > namespace internal { template EIGEN_DEVICE_FUNC void update_value(T& val, DenseIndex new_val) { - val = new_val; + val = internal::convert_index(new_val); } template EIGEN_DEVICE_FUNC void update_value(type2index& val, DenseIndex new_val) { diff --git a/unsupported/test/cxx11_tensor_block_access.cpp b/unsupported/test/cxx11_tensor_block_access.cpp index f572e496d..24a95ab12 100644 --- a/unsupported/test/cxx11_tensor_block_access.cpp +++ b/unsupported/test/cxx11_tensor_block_access.cpp @@ -33,8 +33,8 @@ static internal::TensorBlockShapeType RandomShape() { } template -static std::size_t RandomTargetSize(const DSizes& dims) { - return internal::random(1, dims.TotalSize()); +static Index RandomTargetSize(const DSizes& dims) { + return internal::random(1, dims.TotalSize()); } template @@ -178,7 +178,7 @@ static void test_block_mapper_maps_every_element() { // Verify that every coefficient in the original Tensor is accessible through // TensorBlock only once. Index total_coeffs = dims.TotalSize(); - VERIFY_IS_EQUAL(coeff_set.size(), total_coeffs); + VERIFY_IS_EQUAL(Index(coeff_set.size()), total_coeffs); VERIFY_IS_EQUAL(*coeff_set.begin(), 0); VERIFY_IS_EQUAL(*coeff_set.rbegin(), total_coeffs - 1); } @@ -208,7 +208,7 @@ static void test_slice_block_mapper_maps_every_element() { // Pick a random dimension sizes for the tensor blocks. DSizes block_sizes; for (int i = 0; i < NumDims; ++i) { - block_sizes[i] = internal::random(1, tensor_slice_extents[i]); + block_sizes[i] = internal::random(1, tensor_slice_extents[i]); } TensorSliceBlockMapper block_mapper(tensor_dims, tensor_slice_offsets, @@ -222,7 +222,7 @@ static void test_slice_block_mapper_maps_every_element() { &coeff_set); } - VERIFY_IS_EQUAL(coeff_set.size(), total_coeffs); + VERIFY_IS_EQUAL(Index(coeff_set.size()), total_coeffs); } template @@ -262,14 +262,14 @@ static void test_block_io_copy_data_from_source_to_target() { } template -static int GetInputIndex(Index output_index, +static Index GetInputIndex(Index output_index, const array& output_to_input_dim_map, const array& input_strides, const array& output_strides) { int input_index = 0; if (Layout == ColMajor) { for (int i = NumDims - 1; i > 0; --i) { - const int idx = output_index / output_strides[i]; + const Index idx = output_index / output_strides[i]; input_index += idx * input_strides[output_to_input_dim_map[i]]; output_index -= idx * output_strides[i]; } @@ -277,7 +277,7 @@ static int GetInputIndex(Index output_index, output_index * input_strides[output_to_input_dim_map[0]]; } else { for (int i = 0; i < NumDims - 1; ++i) { - const int idx = output_index / output_strides[i]; + const Index idx = output_index / output_strides[i]; input_index += idx * input_strides[output_to_input_dim_map[i]]; output_index -= idx * output_strides[i]; } @@ -650,7 +650,7 @@ static void test_uniform_block_shape() { // Test shape 'UniformAllDims' with uniform 'max_coeff count'. DSizes dims(11, 5, 6, 17, 7); - const size_t max_coeff_count = 5 * 5 * 5 * 5 * 5; + const Index max_coeff_count = 5 * 5 * 5 * 5 * 5; TensorBlockMapper block_mapper(dims, internal::kUniformAllDims, max_coeff_count); TensorBlock block = block_mapper.GetBlockForIndex(0, NULL); @@ -664,7 +664,7 @@ static void test_uniform_block_shape() // partially into first inner-most dimension. if (Layout == ColMajor) { DSizes dims(11, 5, 6, 17, 7); - const size_t max_coeff_count = 7 * 5 * 5 * 5 * 5; + const Index max_coeff_count = 7 * 5 * 5 * 5 * 5; TensorBlockMapper block_mapper(dims, internal::kUniformAllDims, max_coeff_count); TensorBlock block = block_mapper.GetBlockForIndex(0, NULL); @@ -675,7 +675,7 @@ static void test_uniform_block_shape() VERIFY(block.block_sizes().TotalSize() <= max_coeff_count); } else { DSizes dims(11, 5, 6, 17, 7); - const size_t max_coeff_count = 5 * 5 * 5 * 5 * 6; + const Index max_coeff_count = 5 * 5 * 5 * 5 * 6; TensorBlockMapper block_mapper(dims, internal::kUniformAllDims, max_coeff_count); TensorBlock block = block_mapper.GetBlockForIndex(0, NULL); @@ -690,7 +690,7 @@ static void test_uniform_block_shape() // fully into first inner-most dimension. if (Layout == ColMajor) { DSizes dims(11, 5, 6, 17, 7); - const size_t max_coeff_count = 11 * 5 * 5 * 5 * 5; + const Index max_coeff_count = 11 * 5 * 5 * 5 * 5; TensorBlockMapper block_mapper(dims, internal::kUniformAllDims, max_coeff_count); TensorBlock block = block_mapper.GetBlockForIndex(0, NULL); @@ -701,7 +701,7 @@ static void test_uniform_block_shape() VERIFY(block.block_sizes().TotalSize() <= max_coeff_count); } else { DSizes dims(11, 5, 6, 17, 7); - const size_t max_coeff_count = 5 * 5 * 5 * 5 * 7; + const Index max_coeff_count = 5 * 5 * 5 * 5 * 7; TensorBlockMapper block_mapper(dims, internal::kUniformAllDims, max_coeff_count); TensorBlock block = block_mapper.GetBlockForIndex(0, NULL); @@ -716,7 +716,7 @@ static void test_uniform_block_shape() // fully into first few inner-most dimensions. if (Layout == ColMajor) { DSizes dims(7, 5, 6, 17, 7); - const size_t max_coeff_count = 7 * 5 * 6 * 7 * 5; + const Index max_coeff_count = 7 * 5 * 6 * 7 * 5; TensorBlockMapper block_mapper(dims, internal::kUniformAllDims, max_coeff_count); TensorBlock block = block_mapper.GetBlockForIndex(0, NULL); @@ -728,7 +728,7 @@ static void test_uniform_block_shape() VERIFY(block.block_sizes().TotalSize() <= max_coeff_count); } else { DSizes dims(7, 5, 6, 9, 7); - const size_t max_coeff_count = 5 * 5 * 5 * 6 * 7; + const Index max_coeff_count = 5 * 5 * 5 * 6 * 7; TensorBlockMapper block_mapper(dims, internal::kUniformAllDims, max_coeff_count); TensorBlock block = block_mapper.GetBlockForIndex(0, NULL); @@ -743,7 +743,7 @@ static void test_uniform_block_shape() // Test shape 'UniformAllDims' with full allocation to all dims. if (Layout == ColMajor) { DSizes dims(7, 5, 6, 17, 7); - const size_t max_coeff_count = 7 * 5 * 6 * 17 * 7; + const Index max_coeff_count = 7 * 5 * 6 * 17 * 7; TensorBlockMapper block_mapper(dims, internal::kUniformAllDims, max_coeff_count); TensorBlock block = block_mapper.GetBlockForIndex(0, NULL); @@ -755,7 +755,7 @@ static void test_uniform_block_shape() VERIFY(block.block_sizes().TotalSize() <= max_coeff_count); } else { DSizes dims(7, 5, 6, 9, 7); - const size_t max_coeff_count = 7 * 5 * 6 * 9 * 7; + const Index max_coeff_count = 7 * 5 * 6 * 9 * 7; TensorBlockMapper block_mapper(dims, internal::kUniformAllDims, max_coeff_count); TensorBlock block = block_mapper.GetBlockForIndex(0, NULL); @@ -777,7 +777,7 @@ static void test_skewed_inner_dim_block_shape() // Test shape 'SkewedInnerDims' with partial allocation to inner-most dim. if (Layout == ColMajor) { DSizes dims(11, 5, 6, 17, 7); - const size_t max_coeff_count = 10 * 1 * 1 * 1 * 1; + const Index max_coeff_count = 10 * 1 * 1 * 1 * 1; TensorBlockMapper block_mapper(dims, internal::kSkewedInnerDims, max_coeff_count); TensorBlock block = block_mapper.GetBlockForIndex(0, NULL); @@ -788,7 +788,7 @@ static void test_skewed_inner_dim_block_shape() VERIFY(block.block_sizes().TotalSize() <= max_coeff_count); } else { DSizes dims(11, 5, 6, 17, 7); - const size_t max_coeff_count = 1 * 1 * 1 * 1 * 6; + const Index max_coeff_count = 1 * 1 * 1 * 1 * 6; TensorBlockMapper block_mapper(dims, internal::kSkewedInnerDims, max_coeff_count); TensorBlock block = block_mapper.GetBlockForIndex(0, NULL); @@ -802,7 +802,7 @@ static void test_skewed_inner_dim_block_shape() // Test shape 'SkewedInnerDims' with full allocation to inner-most dim. if (Layout == ColMajor) { DSizes dims(11, 5, 6, 17, 7); - const size_t max_coeff_count = 11 * 1 * 1 * 1 * 1; + const Index max_coeff_count = 11 * 1 * 1 * 1 * 1; TensorBlockMapper block_mapper(dims, internal::kSkewedInnerDims, max_coeff_count); TensorBlock block = block_mapper.GetBlockForIndex(0, NULL); @@ -813,7 +813,7 @@ static void test_skewed_inner_dim_block_shape() VERIFY(block.block_sizes().TotalSize() <= max_coeff_count); } else { DSizes dims(11, 5, 6, 17, 7); - const size_t max_coeff_count = 1 * 1 * 1 * 1 * 7; + const Index max_coeff_count = 1 * 1 * 1 * 1 * 7; TensorBlockMapper block_mapper(dims, internal::kSkewedInnerDims, max_coeff_count); TensorBlock block = block_mapper.GetBlockForIndex(0, NULL); @@ -828,7 +828,7 @@ static void test_skewed_inner_dim_block_shape() // and partial allocation to second inner-dim. if (Layout == ColMajor) { DSizes dims(11, 5, 6, 17, 7); - const size_t max_coeff_count = 11 * 3 * 1 * 1 * 1; + const Index max_coeff_count = 11 * 3 * 1 * 1 * 1; TensorBlockMapper block_mapper(dims, internal::kSkewedInnerDims, max_coeff_count); TensorBlock block = block_mapper.GetBlockForIndex(0, NULL); @@ -840,7 +840,7 @@ static void test_skewed_inner_dim_block_shape() VERIFY(block.block_sizes().TotalSize() <= max_coeff_count); } else { DSizes dims(11, 5, 6, 17, 7); - const size_t max_coeff_count = 1 * 1 * 1 * 15 * 7; + const Index max_coeff_count = 1 * 1 * 1 * 15 * 7; TensorBlockMapper block_mapper(dims, internal::kSkewedInnerDims, max_coeff_count); TensorBlock block = block_mapper.GetBlockForIndex(0, NULL); @@ -856,7 +856,7 @@ static void test_skewed_inner_dim_block_shape() // and partial allocation to third inner-dim. if (Layout == ColMajor) { DSizes dims(11, 5, 6, 17, 7); - const size_t max_coeff_count = 11 * 5 * 5 * 1 * 1; + const Index max_coeff_count = 11 * 5 * 5 * 1 * 1; TensorBlockMapper block_mapper(dims, internal::kSkewedInnerDims, max_coeff_count); TensorBlock block = block_mapper.GetBlockForIndex(0, NULL); @@ -869,7 +869,7 @@ static void test_skewed_inner_dim_block_shape() VERIFY(block.block_sizes().TotalSize() <= max_coeff_count); } else { DSizes dims(11, 5, 6, 17, 7); - const size_t max_coeff_count = 1 * 1 * 5 * 17 * 7; + const Index max_coeff_count = 1 * 1 * 5 * 17 * 7; TensorBlockMapper block_mapper(dims, internal::kSkewedInnerDims, max_coeff_count); TensorBlock block = block_mapper.GetBlockForIndex(0, NULL); @@ -885,7 +885,7 @@ static void test_skewed_inner_dim_block_shape() // Test shape 'SkewedInnerDims' with full allocation to all dims. if (Layout == ColMajor) { DSizes dims(11, 5, 6, 17, 7); - const size_t max_coeff_count = 11 * 5 * 6 * 17 * 7; + const Index max_coeff_count = 11 * 5 * 6 * 17 * 7; TensorBlockMapper block_mapper(dims, internal::kSkewedInnerDims, max_coeff_count); TensorBlock block = block_mapper.GetBlockForIndex(0, NULL); @@ -897,7 +897,7 @@ static void test_skewed_inner_dim_block_shape() VERIFY(block.block_sizes().TotalSize() <= max_coeff_count); } else { DSizes dims(11, 5, 6, 17, 7); - const size_t max_coeff_count = 11 * 5 * 6 * 17 * 7; + const Index max_coeff_count = 11 * 5 * 6 * 17 * 7; TensorBlockMapper block_mapper(dims, internal::kSkewedInnerDims, max_coeff_count); TensorBlock block = block_mapper.GetBlockForIndex(0, NULL); diff --git a/unsupported/test/cxx11_tensor_thread_pool.cpp b/unsupported/test/cxx11_tensor_thread_pool.cpp index 7606b0abf..6d8e58214 100644 --- a/unsupported/test/cxx11_tensor_thread_pool.cpp +++ b/unsupported/test/cxx11_tensor_thread_pool.cpp @@ -300,7 +300,7 @@ static void test_multithread_contraction_with_output_kernel() { m_result = m_left * m_right; - for (size_t i = 0; i < t_result.dimensions().TotalSize(); i++) { + for (Index i = 0; i < t_result.dimensions().TotalSize(); i++) { VERIFY(&t_result.data()[i] != &m_result.data()[i]); VERIFY_IS_APPROX(t_result.data()[i], std::sqrt(m_result.data()[i])); } -- cgit v1.2.3 From b1653d15996b844852e2cefdd4d63e55dbc771f5 Mon Sep 17 00:00:00 2001 From: Christoph Hertzberg Date: Sat, 25 Aug 2018 12:21:00 +0200 Subject: Fix some trivial C++11 vs C++03 compatibility warnings --- unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h | 2 +- unsupported/Eigen/CXX11/src/Tensor/TensorBroadcasting.h | 4 ++-- unsupported/Eigen/CXX11/src/util/EmulateArray.h | 8 ++++---- 3 files changed, 7 insertions(+), 7 deletions(-) (limited to 'unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h') diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h b/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h index aa500eb70..4cb49e9b8 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h @@ -62,7 +62,7 @@ struct cond { */ enum TensorBlockShapeType { kUniformAllDims, - kSkewedInnerDims, + kSkewedInnerDims }; struct TensorOpResourceRequirements { diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorBroadcasting.h b/unsupported/Eigen/CXX11/src/Tensor/TensorBroadcasting.h index b4a77b022..560e3ec22 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorBroadcasting.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorBroadcasting.h @@ -105,7 +105,7 @@ struct TensorEvaluator, Device> typedef typename XprType::CoeffReturnType CoeffReturnType; typedef typename PacketType::type PacketReturnType; static const int PacketSize = PacketType::size; - bool isCopy= false, nByOne = false, oneByN = false; + bool isCopy, nByOne, oneByN; enum { IsAligned = true, @@ -116,7 +116,7 @@ struct TensorEvaluator, Device> }; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) - : m_broadcast(op.broadcast()),m_impl(op.expression(), device) + : isCopy(false), nByOne(false), oneByN(false), m_broadcast(op.broadcast()),m_impl(op.expression(), device) { // The broadcasting op doesn't change the rank of the tensor. One can't broadcast a scalar // and store the result in a scalar. Instead one should reshape the scalar into a a N-D diff --git a/unsupported/Eigen/CXX11/src/util/EmulateArray.h b/unsupported/Eigen/CXX11/src/util/EmulateArray.h index 32db51592..d5c000e08 100644 --- a/unsupported/Eigen/CXX11/src/util/EmulateArray.h +++ b/unsupported/Eigen/CXX11/src/util/EmulateArray.h @@ -207,16 +207,16 @@ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const T& array_get(const array& a) { } template struct array_size > { - static const size_t value = N; + enum { value = N }; }; template struct array_size& > { - static const size_t value = N; + enum { value = N }; }; template struct array_size > { - static const size_t value = N; + enum { value = N }; }; template struct array_size& > { - static const size_t value = N; + enum { value = N }; }; } // end namespace internal -- cgit v1.2.3