From 26239ee580e5ffbdcad657c291bf4f49e6b297cf Mon Sep 17 00:00:00 2001 From: Benoit Steiner Date: Mon, 13 Aug 2018 11:05:51 -0700 Subject: Use NULL instead of nullptr to avoid adding a cxx11 requirement. --- unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h') diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h b/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h index 3f3b5685d..0294aa62e 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h @@ -227,7 +227,7 @@ class TensorExecutor { typedef EvalRange EvalRange; Evaluator evaluator(expr, device); - const bool needs_assign = evaluator.evalSubExprsIfNeeded(nullptr); + const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL); if (needs_assign) { const StorageIndex PacketSize = Vectorizable @@ -271,7 +271,7 @@ class TensorExecutor Date: Mon, 13 Aug 2018 15:16:40 -0700 Subject: Code cleanup --- unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h | 1 - 1 file changed, 1 deletion(-) (limited to 'unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h') diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h b/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h index 0294aa62e..0cefe42dd 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h @@ -257,7 +257,6 @@ class TensorExecutor TensorBlock; typedef TensorBlockMapper TensorBlockMapper; Evaluator evaluator(expr, device); -- cgit v1.2.3 From fbb834144df6190a93757098d097f230b167edc5 Mon Sep 17 00:00:00 2001 From: Benoit Steiner Date: Wed, 15 Aug 2018 08:52:58 -0700 Subject: Fixed more compilation errors --- unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h') diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h b/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h index 0cefe42dd..676645b0c 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h @@ -132,7 +132,7 @@ class TensorExecutor resources; evaluator.getResourceRequirements(&resources); @@ -272,7 +272,7 @@ class TensorExecutor resources; -- cgit v1.2.3 From f197c3f55b3a04ab24dfee8057b1d510c7483fc3 Mon Sep 17 00:00:00 2001 From: Sameer Agarwal Date: Wed, 15 Aug 2018 11:24:57 -0700 Subject: Removed an used variable (PacketSize) from TensorExecutor --- unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h | 4 ---- 1 file changed, 4 deletions(-) (limited to 'unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h') diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h b/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h index 676645b0c..9b9587de5 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h @@ -229,10 +229,6 @@ class TensorExecutor { Evaluator evaluator(expr, device); const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL); if (needs_assign) { - const StorageIndex PacketSize = - Vectorizable - ? unpacket_traits::size - : 1; const StorageIndex size = array_prod(evaluator.dimensions()); device.parallelFor(size, evaluator.costPerCoeff(Vectorizable), EvalRange::alignBlockSize, -- cgit v1.2.3 From f7675b826b2f0a33b09b97342e855a8ef059927e Mon Sep 17 00:00:00 2001 From: Christoph Hertzberg Date: Fri, 24 Aug 2018 22:58:55 +0200 Subject: Fix several integer conversion and sign-compare warnings --- unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h | 27 ++++---- .../Eigen/CXX11/src/Tensor/TensorDimensions.h | 72 +++++++++++----------- .../Eigen/CXX11/src/Tensor/TensorExecutor.h | 4 +- .../Eigen/CXX11/src/Tensor/TensorIndexList.h | 2 +- unsupported/test/cxx11_tensor_block_access.cpp | 54 ++++++++-------- unsupported/test/cxx11_tensor_thread_pool.cpp | 2 +- 6 files changed, 80 insertions(+), 81 deletions(-) (limited to 'unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h') diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h b/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h index 24a6343e8..aa500eb70 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h @@ -212,11 +212,11 @@ class TensorBlockIO { num_size_one_inner_dims, NumDims - num_size_one_inner_dims - 1); const StorageIndex block_dim_for_tensor_stride1_dim = NumDims == 0 ? 1 : tensor_to_block_dim_map[tensor_stride1_dim]; - size_t block_inner_dim_size = + StorageIndex block_inner_dim_size = NumDims == 0 ? 1 : block.block_sizes()[block_dim_for_tensor_stride1_dim]; - for (int i = num_size_one_inner_dims + 1; i < NumDims; ++i) { - const int dim = cond()(i, NumDims - i - 1); + for (Index i = num_size_one_inner_dims + 1; i < NumDims; ++i) { + const Index dim = cond()(i, NumDims - i - 1); const StorageIndex block_stride = block.block_strides()[tensor_to_block_dim_map[dim]]; if (block_inner_dim_size == block_stride && @@ -258,8 +258,8 @@ class TensorBlockIO { // Initialize block iterator state. Squeeze away any dimension of size 1. int num_squeezed_dims = 0; - for (int i = num_size_one_inner_dims; i < NumDims - 1; ++i) { - const int dim = cond()(i + 1, NumDims - i - 2); + for (Index i = num_size_one_inner_dims; i < NumDims - 1; ++i) { + const Index dim = cond()(i + 1, NumDims - i - 2); const StorageIndex size = block.block_sizes()[tensor_to_block_dim_map[dim]]; if (size == 1) { continue; @@ -626,7 +626,7 @@ class TensorBlockMapper { const TensorBlockShapeType block_shape, Index min_target_size) : m_dimensions(dims), - m_block_dim_sizes(BlockDimensions(dims, block_shape, min_target_size)) { + m_block_dim_sizes(BlockDimensions(dims, block_shape, internal::convert_index(min_target_size))) { // Calculate block counts by dimension and total block count. DSizes block_count; for (Index i = 0; i < block_count.rank(); ++i) { @@ -717,8 +717,8 @@ class TensorBlockMapper { private: static Dimensions BlockDimensions(const Dimensions& tensor_dims, const TensorBlockShapeType block_shape, - Index min_target_size) { - min_target_size = numext::maxi(1, min_target_size); + StorageIndex min_target_size) { + min_target_size = numext::maxi(1, min_target_size); // If tensor fully fits into the target size, we'll treat it a single block. Dimensions block_dim_sizes = tensor_dims; @@ -735,16 +735,15 @@ class TensorBlockMapper { if (block_shape == kUniformAllDims) { // Tensor will not fit within 'min_target_size' budget: calculate tensor // block dimension sizes based on "square" dimension size target. - const size_t dim_size_target = static_cast( + const StorageIndex dim_size_target = internal::convert_index( std::pow(static_cast(min_target_size), 1.0f / static_cast(block_dim_sizes.rank()))); - for (size_t i = 0; i < block_dim_sizes.rank(); ++i) { + for (Index i = 0; i < block_dim_sizes.rank(); ++i) { // TODO(andydavis) Adjust the inner most 'block_dim_size' to make it // a multiple of the packet size. Note that reducing // 'block_dim_size' in this manner can increase the number of // blocks, and so will amplify any per-block overhead. - block_dim_sizes[i] = numext::mini( - dim_size_target, static_cast(tensor_dims[i])); + block_dim_sizes[i] = numext::mini(dim_size_target, tensor_dims[i]); } // Add any un-allocated coefficients to inner dimension(s). StorageIndex total_size = block_dim_sizes.TotalSize(); @@ -781,7 +780,7 @@ class TensorBlockMapper { eigen_assert( block_dim_sizes.TotalSize() >= - numext::mini(min_target_size, tensor_dims.TotalSize())); + numext::mini(min_target_size, tensor_dims.TotalSize())); return block_dim_sizes; } @@ -824,7 +823,7 @@ class TensorSliceBlockMapper { m_total_block_count(1) { // Calculate block counts by dimension and total block count. DSizes block_count; - for (size_t i = 0; i < block_count.rank(); ++i) { + for (Index i = 0; i < block_count.rank(); ++i) { block_count[i] = divup(m_tensor_slice_extents[i], m_block_dim_sizes[i]); } m_total_block_count = array_prod(block_count); diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorDimensions.h b/unsupported/Eigen/CXX11/src/Tensor/TensorDimensions.h index 4f973a5b7..ce91bc2a6 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorDimensions.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorDimensions.h @@ -32,12 +32,12 @@ namespace Eigen { // Boilerplate code namespace internal { -template struct dget { +template struct dget { static const std::ptrdiff_t value = get::value; }; -template +template struct fixed_size_tensor_index_linearization_helper { template EIGEN_DEVICE_FUNC @@ -50,7 +50,7 @@ struct fixed_size_tensor_index_linearization_helper } }; -template +template struct fixed_size_tensor_index_linearization_helper { template EIGEN_DEVICE_FUNC @@ -60,7 +60,7 @@ struct fixed_size_tensor_index_linearization_helper +template struct fixed_size_tensor_index_extraction_helper { template EIGEN_DEVICE_FUNC @@ -94,7 +94,7 @@ struct Sizes { typedef internal::numeric_list Base; const Base t = Base(); static const std::ptrdiff_t total_size = internal::arg_prod(Indices...); - static const size_t count = Base::count; + static const ptrdiff_t count = Base::count; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t rank() const { return Base::count; @@ -121,16 +121,16 @@ struct Sizes { return *this; } - EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t operator[] (const std::size_t index) const { + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t operator[] (const std::ptrdiff_t index) const { return internal::fixed_size_tensor_index_extraction_helper::run(index, t); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE - size_t IndexOfColMajor(const array& indices) const { + ptrdiff_t IndexOfColMajor(const array& indices) const { return internal::fixed_size_tensor_index_linearization_helper::run(indices, t); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE - size_t IndexOfRowMajor(const array& indices) const { + ptrdiff_t IndexOfRowMajor(const array& indices) const { return internal::fixed_size_tensor_index_linearization_helper::run(indices, t); } }; @@ -144,25 +144,25 @@ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t array_prod(const Sizes +template struct non_zero_size { - typedef internal::type2val type; + typedef internal::type2val type; }; template <> struct non_zero_size<0> { typedef internal::null_type type; }; -template struct Sizes { +template struct Sizes { typedef typename internal::make_type_list::type, typename non_zero_size::type, typename non_zero_size::type, typename non_zero_size::type, typename non_zero_size::type >::type Base; - static const size_t count = Base::count; - static const std::size_t total_size = internal::arg_prod::value; + static const std::ptrdiff_t count = Base::count; + static const std::ptrdiff_t total_size = internal::arg_prod::value; - EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE size_t rank() const { + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ptrdiff_t rank() const { return count; } - static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE size_t TotalSize() { + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ptrdiff_t TotalSize() { return internal::arg_prod::value; } @@ -178,7 +178,7 @@ template Sizes(DenseIndex... /*indices*/) { } - explicit Sizes(std::initializer_list) { + explicit Sizes(std::initializer_list) { // todo: add assertion } #else @@ -213,18 +213,18 @@ template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE - size_t IndexOfColMajor(const array& indices) const { + ptrdiff_t IndexOfColMajor(const array& indices) const { return internal::fixed_size_tensor_index_linearization_helper::run(indices, *reinterpret_cast(this)); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE - size_t IndexOfRowMajor(const array& indices) const { + ptrdiff_t IndexOfRowMajor(const array& indices) const { return internal::fixed_size_tensor_index_linearization_helper::run(indices, *reinterpret_cast(this)); } }; namespace internal { -template -EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::size_t array_prod(const Sizes&) { +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t array_prod(const Sizes&) { return Sizes::total_size; } } @@ -233,7 +233,7 @@ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::size_t array_prod(const Sizes +template struct tensor_index_linearization_helper { static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE @@ -245,7 +245,7 @@ struct tensor_index_linearization_helper } }; -template +template struct tensor_index_linearization_helper { static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE @@ -264,7 +264,7 @@ struct DSizes : array { typedef array Base; static const int count = NumDims; - EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE size_t rank() const { + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rank() const { return NumDims; } @@ -298,7 +298,7 @@ struct DSizes : array { } } #else - template + template EIGEN_DEVICE_FUNC DSizes(const Sizes& a) { for (int i = 0 ; i < NumDims; ++i) { (*this)[i] = a[i]; @@ -359,7 +359,7 @@ struct DSizes : array { // Boilerplate namespace internal { -template +template struct tensor_vsize_index_linearization_helper { static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE @@ -371,7 +371,7 @@ struct tensor_vsize_index_linearization_helper } }; -template +template struct tensor_vsize_index_linearization_helper { static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE @@ -386,10 +386,10 @@ struct tensor_vsize_index_linearization_helper namespace internal { template struct array_size > { - static const size_t value = NumDims; + static const ptrdiff_t value = NumDims; }; template struct array_size > { - static const size_t value = NumDims; + static const ptrdiff_t value = NumDims; }; #ifndef EIGEN_EMULATE_CXX11_META_H template struct array_size > { @@ -399,33 +399,33 @@ template struct array_size::count; }; template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t array_get(const Sizes&) { - return get >::value; + return get >::value; } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t array_get(const Sizes<>&) { eigen_assert(false && "should never be called"); return -1; } #else -template struct array_size > { - static const size_t value = Sizes::count; +template struct array_size > { + static const ptrdiff_t value = Sizes::count; }; -template struct array_size > { - static const size_t value = Sizes::count; +template struct array_size > { + static const ptrdiff_t value = Sizes::count; }; -template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::size_t array_get(const Sizes&) { +template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t array_get(const Sizes&) { return get::Base>::value; } #endif -template +template struct sizes_match_below_dim { static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool run(Dims1&, Dims2&) { return false; } }; -template +template struct sizes_match_below_dim { static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool run(Dims1& dims1, Dims2& dims2) { return (array_get(dims1) == array_get(dims2)) & diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h b/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h index 9b9587de5..b756be3b3 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h @@ -256,8 +256,8 @@ class TensorExecutor TensorBlockMapper; Evaluator evaluator(expr, device); - StorageIndex total_size = array_prod(evaluator.dimensions()); - StorageIndex cache_size = device.firstLevelCacheSize() / sizeof(Scalar); + Index total_size = array_prod(evaluator.dimensions()); + Index cache_size = device.firstLevelCacheSize() / sizeof(Scalar); if (total_size < cache_size) { // TODO(andydavis) Reduce block management overhead for small tensors. internal::TensorExecutor struct NumTraits > namespace internal { template EIGEN_DEVICE_FUNC void update_value(T& val, DenseIndex new_val) { - val = new_val; + val = internal::convert_index(new_val); } template EIGEN_DEVICE_FUNC void update_value(type2index& val, DenseIndex new_val) { diff --git a/unsupported/test/cxx11_tensor_block_access.cpp b/unsupported/test/cxx11_tensor_block_access.cpp index f572e496d..24a95ab12 100644 --- a/unsupported/test/cxx11_tensor_block_access.cpp +++ b/unsupported/test/cxx11_tensor_block_access.cpp @@ -33,8 +33,8 @@ static internal::TensorBlockShapeType RandomShape() { } template -static std::size_t RandomTargetSize(const DSizes& dims) { - return internal::random(1, dims.TotalSize()); +static Index RandomTargetSize(const DSizes& dims) { + return internal::random(1, dims.TotalSize()); } template @@ -178,7 +178,7 @@ static void test_block_mapper_maps_every_element() { // Verify that every coefficient in the original Tensor is accessible through // TensorBlock only once. Index total_coeffs = dims.TotalSize(); - VERIFY_IS_EQUAL(coeff_set.size(), total_coeffs); + VERIFY_IS_EQUAL(Index(coeff_set.size()), total_coeffs); VERIFY_IS_EQUAL(*coeff_set.begin(), 0); VERIFY_IS_EQUAL(*coeff_set.rbegin(), total_coeffs - 1); } @@ -208,7 +208,7 @@ static void test_slice_block_mapper_maps_every_element() { // Pick a random dimension sizes for the tensor blocks. DSizes block_sizes; for (int i = 0; i < NumDims; ++i) { - block_sizes[i] = internal::random(1, tensor_slice_extents[i]); + block_sizes[i] = internal::random(1, tensor_slice_extents[i]); } TensorSliceBlockMapper block_mapper(tensor_dims, tensor_slice_offsets, @@ -222,7 +222,7 @@ static void test_slice_block_mapper_maps_every_element() { &coeff_set); } - VERIFY_IS_EQUAL(coeff_set.size(), total_coeffs); + VERIFY_IS_EQUAL(Index(coeff_set.size()), total_coeffs); } template @@ -262,14 +262,14 @@ static void test_block_io_copy_data_from_source_to_target() { } template -static int GetInputIndex(Index output_index, +static Index GetInputIndex(Index output_index, const array& output_to_input_dim_map, const array& input_strides, const array& output_strides) { int input_index = 0; if (Layout == ColMajor) { for (int i = NumDims - 1; i > 0; --i) { - const int idx = output_index / output_strides[i]; + const Index idx = output_index / output_strides[i]; input_index += idx * input_strides[output_to_input_dim_map[i]]; output_index -= idx * output_strides[i]; } @@ -277,7 +277,7 @@ static int GetInputIndex(Index output_index, output_index * input_strides[output_to_input_dim_map[0]]; } else { for (int i = 0; i < NumDims - 1; ++i) { - const int idx = output_index / output_strides[i]; + const Index idx = output_index / output_strides[i]; input_index += idx * input_strides[output_to_input_dim_map[i]]; output_index -= idx * output_strides[i]; } @@ -650,7 +650,7 @@ static void test_uniform_block_shape() { // Test shape 'UniformAllDims' with uniform 'max_coeff count'. DSizes dims(11, 5, 6, 17, 7); - const size_t max_coeff_count = 5 * 5 * 5 * 5 * 5; + const Index max_coeff_count = 5 * 5 * 5 * 5 * 5; TensorBlockMapper block_mapper(dims, internal::kUniformAllDims, max_coeff_count); TensorBlock block = block_mapper.GetBlockForIndex(0, NULL); @@ -664,7 +664,7 @@ static void test_uniform_block_shape() // partially into first inner-most dimension. if (Layout == ColMajor) { DSizes dims(11, 5, 6, 17, 7); - const size_t max_coeff_count = 7 * 5 * 5 * 5 * 5; + const Index max_coeff_count = 7 * 5 * 5 * 5 * 5; TensorBlockMapper block_mapper(dims, internal::kUniformAllDims, max_coeff_count); TensorBlock block = block_mapper.GetBlockForIndex(0, NULL); @@ -675,7 +675,7 @@ static void test_uniform_block_shape() VERIFY(block.block_sizes().TotalSize() <= max_coeff_count); } else { DSizes dims(11, 5, 6, 17, 7); - const size_t max_coeff_count = 5 * 5 * 5 * 5 * 6; + const Index max_coeff_count = 5 * 5 * 5 * 5 * 6; TensorBlockMapper block_mapper(dims, internal::kUniformAllDims, max_coeff_count); TensorBlock block = block_mapper.GetBlockForIndex(0, NULL); @@ -690,7 +690,7 @@ static void test_uniform_block_shape() // fully into first inner-most dimension. if (Layout == ColMajor) { DSizes dims(11, 5, 6, 17, 7); - const size_t max_coeff_count = 11 * 5 * 5 * 5 * 5; + const Index max_coeff_count = 11 * 5 * 5 * 5 * 5; TensorBlockMapper block_mapper(dims, internal::kUniformAllDims, max_coeff_count); TensorBlock block = block_mapper.GetBlockForIndex(0, NULL); @@ -701,7 +701,7 @@ static void test_uniform_block_shape() VERIFY(block.block_sizes().TotalSize() <= max_coeff_count); } else { DSizes dims(11, 5, 6, 17, 7); - const size_t max_coeff_count = 5 * 5 * 5 * 5 * 7; + const Index max_coeff_count = 5 * 5 * 5 * 5 * 7; TensorBlockMapper block_mapper(dims, internal::kUniformAllDims, max_coeff_count); TensorBlock block = block_mapper.GetBlockForIndex(0, NULL); @@ -716,7 +716,7 @@ static void test_uniform_block_shape() // fully into first few inner-most dimensions. if (Layout == ColMajor) { DSizes dims(7, 5, 6, 17, 7); - const size_t max_coeff_count = 7 * 5 * 6 * 7 * 5; + const Index max_coeff_count = 7 * 5 * 6 * 7 * 5; TensorBlockMapper block_mapper(dims, internal::kUniformAllDims, max_coeff_count); TensorBlock block = block_mapper.GetBlockForIndex(0, NULL); @@ -728,7 +728,7 @@ static void test_uniform_block_shape() VERIFY(block.block_sizes().TotalSize() <= max_coeff_count); } else { DSizes dims(7, 5, 6, 9, 7); - const size_t max_coeff_count = 5 * 5 * 5 * 6 * 7; + const Index max_coeff_count = 5 * 5 * 5 * 6 * 7; TensorBlockMapper block_mapper(dims, internal::kUniformAllDims, max_coeff_count); TensorBlock block = block_mapper.GetBlockForIndex(0, NULL); @@ -743,7 +743,7 @@ static void test_uniform_block_shape() // Test shape 'UniformAllDims' with full allocation to all dims. if (Layout == ColMajor) { DSizes dims(7, 5, 6, 17, 7); - const size_t max_coeff_count = 7 * 5 * 6 * 17 * 7; + const Index max_coeff_count = 7 * 5 * 6 * 17 * 7; TensorBlockMapper block_mapper(dims, internal::kUniformAllDims, max_coeff_count); TensorBlock block = block_mapper.GetBlockForIndex(0, NULL); @@ -755,7 +755,7 @@ static void test_uniform_block_shape() VERIFY(block.block_sizes().TotalSize() <= max_coeff_count); } else { DSizes dims(7, 5, 6, 9, 7); - const size_t max_coeff_count = 7 * 5 * 6 * 9 * 7; + const Index max_coeff_count = 7 * 5 * 6 * 9 * 7; TensorBlockMapper block_mapper(dims, internal::kUniformAllDims, max_coeff_count); TensorBlock block = block_mapper.GetBlockForIndex(0, NULL); @@ -777,7 +777,7 @@ static void test_skewed_inner_dim_block_shape() // Test shape 'SkewedInnerDims' with partial allocation to inner-most dim. if (Layout == ColMajor) { DSizes dims(11, 5, 6, 17, 7); - const size_t max_coeff_count = 10 * 1 * 1 * 1 * 1; + const Index max_coeff_count = 10 * 1 * 1 * 1 * 1; TensorBlockMapper block_mapper(dims, internal::kSkewedInnerDims, max_coeff_count); TensorBlock block = block_mapper.GetBlockForIndex(0, NULL); @@ -788,7 +788,7 @@ static void test_skewed_inner_dim_block_shape() VERIFY(block.block_sizes().TotalSize() <= max_coeff_count); } else { DSizes dims(11, 5, 6, 17, 7); - const size_t max_coeff_count = 1 * 1 * 1 * 1 * 6; + const Index max_coeff_count = 1 * 1 * 1 * 1 * 6; TensorBlockMapper block_mapper(dims, internal::kSkewedInnerDims, max_coeff_count); TensorBlock block = block_mapper.GetBlockForIndex(0, NULL); @@ -802,7 +802,7 @@ static void test_skewed_inner_dim_block_shape() // Test shape 'SkewedInnerDims' with full allocation to inner-most dim. if (Layout == ColMajor) { DSizes dims(11, 5, 6, 17, 7); - const size_t max_coeff_count = 11 * 1 * 1 * 1 * 1; + const Index max_coeff_count = 11 * 1 * 1 * 1 * 1; TensorBlockMapper block_mapper(dims, internal::kSkewedInnerDims, max_coeff_count); TensorBlock block = block_mapper.GetBlockForIndex(0, NULL); @@ -813,7 +813,7 @@ static void test_skewed_inner_dim_block_shape() VERIFY(block.block_sizes().TotalSize() <= max_coeff_count); } else { DSizes dims(11, 5, 6, 17, 7); - const size_t max_coeff_count = 1 * 1 * 1 * 1 * 7; + const Index max_coeff_count = 1 * 1 * 1 * 1 * 7; TensorBlockMapper block_mapper(dims, internal::kSkewedInnerDims, max_coeff_count); TensorBlock block = block_mapper.GetBlockForIndex(0, NULL); @@ -828,7 +828,7 @@ static void test_skewed_inner_dim_block_shape() // and partial allocation to second inner-dim. if (Layout == ColMajor) { DSizes dims(11, 5, 6, 17, 7); - const size_t max_coeff_count = 11 * 3 * 1 * 1 * 1; + const Index max_coeff_count = 11 * 3 * 1 * 1 * 1; TensorBlockMapper block_mapper(dims, internal::kSkewedInnerDims, max_coeff_count); TensorBlock block = block_mapper.GetBlockForIndex(0, NULL); @@ -840,7 +840,7 @@ static void test_skewed_inner_dim_block_shape() VERIFY(block.block_sizes().TotalSize() <= max_coeff_count); } else { DSizes dims(11, 5, 6, 17, 7); - const size_t max_coeff_count = 1 * 1 * 1 * 15 * 7; + const Index max_coeff_count = 1 * 1 * 1 * 15 * 7; TensorBlockMapper block_mapper(dims, internal::kSkewedInnerDims, max_coeff_count); TensorBlock block = block_mapper.GetBlockForIndex(0, NULL); @@ -856,7 +856,7 @@ static void test_skewed_inner_dim_block_shape() // and partial allocation to third inner-dim. if (Layout == ColMajor) { DSizes dims(11, 5, 6, 17, 7); - const size_t max_coeff_count = 11 * 5 * 5 * 1 * 1; + const Index max_coeff_count = 11 * 5 * 5 * 1 * 1; TensorBlockMapper block_mapper(dims, internal::kSkewedInnerDims, max_coeff_count); TensorBlock block = block_mapper.GetBlockForIndex(0, NULL); @@ -869,7 +869,7 @@ static void test_skewed_inner_dim_block_shape() VERIFY(block.block_sizes().TotalSize() <= max_coeff_count); } else { DSizes dims(11, 5, 6, 17, 7); - const size_t max_coeff_count = 1 * 1 * 5 * 17 * 7; + const Index max_coeff_count = 1 * 1 * 5 * 17 * 7; TensorBlockMapper block_mapper(dims, internal::kSkewedInnerDims, max_coeff_count); TensorBlock block = block_mapper.GetBlockForIndex(0, NULL); @@ -885,7 +885,7 @@ static void test_skewed_inner_dim_block_shape() // Test shape 'SkewedInnerDims' with full allocation to all dims. if (Layout == ColMajor) { DSizes dims(11, 5, 6, 17, 7); - const size_t max_coeff_count = 11 * 5 * 6 * 17 * 7; + const Index max_coeff_count = 11 * 5 * 6 * 17 * 7; TensorBlockMapper block_mapper(dims, internal::kSkewedInnerDims, max_coeff_count); TensorBlock block = block_mapper.GetBlockForIndex(0, NULL); @@ -897,7 +897,7 @@ static void test_skewed_inner_dim_block_shape() VERIFY(block.block_sizes().TotalSize() <= max_coeff_count); } else { DSizes dims(11, 5, 6, 17, 7); - const size_t max_coeff_count = 11 * 5 * 6 * 17 * 7; + const Index max_coeff_count = 11 * 5 * 6 * 17 * 7; TensorBlockMapper block_mapper(dims, internal::kSkewedInnerDims, max_coeff_count); TensorBlock block = block_mapper.GetBlockForIndex(0, NULL); diff --git a/unsupported/test/cxx11_tensor_thread_pool.cpp b/unsupported/test/cxx11_tensor_thread_pool.cpp index 7606b0abf..6d8e58214 100644 --- a/unsupported/test/cxx11_tensor_thread_pool.cpp +++ b/unsupported/test/cxx11_tensor_thread_pool.cpp @@ -300,7 +300,7 @@ static void test_multithread_contraction_with_output_kernel() { m_result = m_left * m_right; - for (size_t i = 0; i < t_result.dimensions().TotalSize(); i++) { + for (Index i = 0; i < t_result.dimensions().TotalSize(); i++) { VERIFY(&t_result.data()[i] != &m_result.data()[i]); VERIFY_IS_APPROX(t_result.data()[i], std::sqrt(m_result.data()[i])); } -- cgit v1.2.3