diff options
author | Benoit Steiner <benoit.steiner.goog@gmail.com> | 2015-02-25 22:22:37 -0800 |
---|---|---|
committer | Benoit Steiner <benoit.steiner.goog@gmail.com> | 2015-02-25 22:22:37 -0800 |
commit | f8fbb3f9a642371d3798e8d6e9638d878e00d945 (patch) | |
tree | 72735cc8eadc08298f69052f464c9f14ff4a1063 /unsupported/Eigen | |
parent | 8e817b65d02e6f6c2d4a0d0085212db0abe6c485 (diff) |
Fixed several compilation warnings reported by clang
Diffstat (limited to 'unsupported/Eigen')
5 files changed, 14 insertions, 17 deletions
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorBroadcasting.h b/unsupported/Eigen/CXX11/src/Tensor/TensorBroadcasting.h index 5790e19d6..055a7d407 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorBroadcasting.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorBroadcasting.h @@ -249,7 +249,7 @@ struct TensorEvaluator<const TensorBroadcastingOp<Broadcast, ArgType>, Device> innermostLoc = index; } else { if (internal::index_statically_eq<InputDimensions>()(0, 1)) { - eigen_assert(innermostLoc % m_impl.dimensions()[0] == 0); + eigen_assert(index % m_impl.dimensions()[0] == 0); innermostLoc = 0; } else { innermostLoc = index % m_impl.dimensions()[0]; @@ -302,7 +302,7 @@ struct TensorEvaluator<const TensorBroadcastingOp<Broadcast, ArgType>, Device> innermostLoc = index; } else { if (internal::index_statically_eq<InputDimensions>()(NumDims-1, 1)) { - eigen_assert(innermostLoc % m_impl.dimensions()[NumDims-1] == 0); + eigen_assert(index % m_impl.dimensions()[NumDims-1] == 0); innermostLoc = 0; } else { innermostLoc = index % m_impl.dimensions()[NumDims-1]; diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h index 8b87f1045..9259c864e 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h @@ -174,8 +174,6 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT OutputMapper output(buffer, m); - LhsPacker pack_lhs; - // compute block sizes (which depend on number of threads) const Index num_threads = this->m_device.numThreads(); Index mc = m; @@ -190,8 +188,8 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT const Index k_blocks = CEIL_DIV(k, kc); const Index n_blocks = CEIL_DIV(n, nc); const Index m_blocks = CEIL_DIV(m, mc); - const int sizeA = mc * kc; - const int sizeB = kc * nc; + const Index sizeA = mc * kc; + const Index sizeB = kc * nc; /* cout << "m: " << m << " n: " << n << " k: " << k << endl; cout << "mc: " << mc << " nc: " << nc << " kc: " << kc << endl; @@ -228,7 +226,7 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT const Index num_kernel_promises = num_threads * n_blocks; std::vector<Promise> kernel_promises(num_kernel_promises); std::vector<Future> kernel_futures(num_kernel_promises); - for (int i = 0; i < kernel_promises.size(); ++i) { + for (std::size_t i = 0; i < kernel_promises.size(); ++i) { kernel_promises[i].set_value(); kernel_futures[i] = kernel_promises[i].get_future(); } @@ -239,16 +237,16 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT const Index actual_kc = (std::min)(k_start + kc, k) - k_start; for (Index m_block_idx = 0; m_block_idx < m_blocks; m_block_idx += numBlockAs) { - const int num_blocks = (std::min)(m_blocks-m_block_idx, numBlockAs); + const Index num_blocks = (std::min)(m_blocks-m_block_idx, numBlockAs); for (Index mt_block_idx = m_block_idx; mt_block_idx < m_block_idx+num_blocks; mt_block_idx++) { const Index m_start = mt_block_idx * mc; const Index actual_mc = (std::min)(m_start + mc, m) - m_start; eigen_assert(actual_mc > 0); - int blockAId = (k_block_idx * m_blocks + mt_block_idx) % num_threads; + Index blockAId = (k_block_idx * m_blocks + mt_block_idx) % num_threads; for (int i = 0; i < n_blocks; ++i) { - int future_id = (blockAId * n_blocks + i); + Index future_id = (blockAId * n_blocks + i); wait_until_ready(&kernel_futures[future_id]); kernel_promises[future_id] = Promise(); kernel_futures[future_id] = kernel_promises[future_id].get_future(); @@ -277,9 +275,9 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT // first make sure the previous kernels are all done before overwriting rhs. Also wait if // we're going to start new k. In both cases need_to_pack is true. if (need_to_pack) { - for (int i = num_blocks; i < num_threads; ++i) { - int blockAId = (k_block_idx * m_blocks + i + m_block_idx) % num_threads; - int future_id = (blockAId * n_blocks + n_block_idx); + for (Index i = num_blocks; i < num_threads; ++i) { + Index blockAId = (k_block_idx * m_blocks + i + m_block_idx) % num_threads; + Index future_id = (blockAId * n_blocks + n_block_idx); wait_until_ready(&kernel_futures[future_id]); } } @@ -361,7 +359,7 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT for (Index mt_block_idx = 0; mt_block_idx < arg.num_blockAs; mt_block_idx++) { const Index m_base_start = arg.m + arg.mc*mt_block_idx; if (m_base_start < arg.max_m) { - int blockAId = (arg.k_block_idx * arg.m_blocks + mt_block_idx + arg.m_block_idx) % arg.num_threads; + Index blockAId = (arg.k_block_idx * arg.m_blocks + mt_block_idx + arg.m_block_idx) % arg.num_threads; wait_until_ready(&(*arg.lhs_futures)[blockAId]); const Index actual_mc = (std::min)(m_base_start + arg.mc, arg.max_m) - m_base_start; diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorDimensions.h b/unsupported/Eigen/CXX11/src/Tensor/TensorDimensions.h index 2ad52b2f9..5e805fd95 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorDimensions.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorDimensions.h @@ -230,7 +230,7 @@ struct DSizes : array<DenseIndex, NumDims> { } EIGEN_DEVICE_FUNC DSizes() { - for (int i = 0 ; i < NumDims; ++i) { + for (std::size_t i = 0 ; i < NumDims; ++i) { (*this)[i] = 0; } } diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h b/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h index 05ac9bd2f..1eb37bf1c 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h @@ -131,7 +131,6 @@ class TensorExecutor<Expression, ThreadPoolDevice, Vectorizable> const Index blocksize = std::max<Index>(PacketSize, (blocksz - (blocksz % PacketSize))); const Index numblocks = size / blocksize; - Index i = 0; std::vector<Future> results; results.reserve(numblocks); for (int i = 0; i < numblocks; ++i) { diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h b/unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h index a93f48ccb..01ba0a80f 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h @@ -302,7 +302,7 @@ struct TensorEvaluator<const TensorSlicingOp<StartIndices, Sizes, ArgType>, Devi EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) : m_impl(op.expression(), device), m_device(device), m_dimensions(op.sizes()), m_offsets(op.startIndices()) { - for (int i = 0; i < internal::array_size<Dimensions>::value; ++i) { + for (std::size_t i = 0; i < internal::array_size<Dimensions>::value; ++i) { eigen_assert(m_impl.dimensions()[i] >= op.sizes()[i] + op.startIndices()[i]); } |