aboutsummaryrefslogtreecommitdiffhomepage
path: root/unsupported/Eigen/CXX11/src
diff options
context:
space:
mode:
authorGravatar Benoit Steiner <benoit.steiner.goog@gmail.com>2016-03-28 09:21:04 -0700
committerGravatar Benoit Steiner <benoit.steiner.goog@gmail.com>2016-03-28 09:21:04 -0700
commit1bc81f78895effe972ef8df5a138d267a74295fb (patch)
tree04d21608bd912022c807b2bdd3074a0b4e69c89a /unsupported/Eigen/CXX11/src
parent78f83d6f6aa873b9dc128e83c4fc63d0f384fac1 (diff)
Fixed compilation warnings on arm
Diffstat (limited to 'unsupported/Eigen/CXX11/src')
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h8
1 files changed, 4 insertions, 4 deletions
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h b/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h
index 9875601ba..00f870328 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h
@@ -254,7 +254,7 @@ struct FullReducer<Self, Op, ThreadPoolDevice, false> {
} else {
const Index blocksize = std::floor<Index>(static_cast<float>(num_coeffs) / num_threads);
const unsigned int numblocks = blocksize > 0 ? static_cast<unsigned int>(num_coeffs / blocksize) : 0;
- eigen_assert(num_coeffs >= numblocks * blocksize);
+ eigen_assert(num_coeffs >= static_cast<Index>(numblocks) * blocksize);
Barrier barrier(numblocks);
MaxSizeVector<typename Self::CoeffReturnType> shards(numblocks, reducer.initialize());
@@ -264,7 +264,7 @@ struct FullReducer<Self, Op, ThreadPoolDevice, false> {
}
typename Self::CoeffReturnType finalShard;
- if (numblocks * blocksize < num_coeffs) {
+ if (static_cast<Index>(numblocks) * blocksize < num_coeffs) {
finalShard = InnerMostDimReducer<Self, Op, false>::reduce(
self, numblocks * blocksize, num_coeffs - numblocks * blocksize, reducer);
} else {
@@ -301,7 +301,7 @@ struct FullReducer<Self, Op, ThreadPoolDevice, true> {
}
const Index blocksize = std::floor<Index>(static_cast<float>(num_coeffs) / num_threads);
const unsigned int numblocks = blocksize > 0 ? static_cast<unsigned int>(num_coeffs / blocksize) : 0;
- eigen_assert(num_coeffs >= numblocks * blocksize);
+ eigen_assert(num_coeffs >= static_cast<Index>(numblocks) * blocksize);
Barrier barrier(numblocks);
MaxSizeVector<typename Self::CoeffReturnType> shards(numblocks, reducer.initialize());
@@ -311,7 +311,7 @@ struct FullReducer<Self, Op, ThreadPoolDevice, true> {
&shards[i]);
}
typename Self::CoeffReturnType finalShard;
- if (numblocks * blocksize < num_coeffs) {
+ if (static_cast<Index>(numblocks) * blocksize < num_coeffs) {
finalShard = InnerMostDimReducer<Self, Op, true>::reduce(
self, numblocks * blocksize, num_coeffs - numblocks * blocksize, reducer);
} else {