diff options
author | 2015-02-28 02:32:46 -0800 | |
---|---|---|
committer | 2015-02-28 02:32:46 -0800 | |
commit | bb483313f6a9c69e15ad5e668368eb5f80adf8f7 (patch) | |
tree | e0e4f71ad3374cc903f172b2b2b3a33f29fa4c61 /unsupported/Eigen | |
parent | fb53384b0f8e3d2ef6e7ec9a4e51c27142ef880c (diff) |
Fixed another batch of compilation warnings
Diffstat (limited to 'unsupported/Eigen')
4 files changed, 15 insertions, 15 deletions
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h index f7254a24d..84f2f715c 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h @@ -32,7 +32,7 @@ enum { template<typename Scalar, typename Index, int side, typename Tensor, typename nocontract_t, typename contract_t, - size_t packet_size, bool inner_dim_contiguous> + int packet_size, bool inner_dim_contiguous> class BaseTensorContractionMapper { public: EIGEN_DEVICE_FUNC @@ -162,14 +162,14 @@ class BaseTensorContractionMapper { template<typename Scalar, typename Index, int side, typename Tensor, typename nocontract_t, typename contract_t, - size_t packet_size, + int packet_size, bool inner_dim_contiguous, bool inner_dim_reordered, int Alignment> class TensorContractionInputMapper; template<typename Scalar, typename Index, int side, typename Tensor, typename nocontract_t, typename contract_t, - size_t packet_size, + int packet_size, bool inner_dim_contiguous, bool inner_dim_reordered, int Alignment> class TensorContractionSubMapper { public: @@ -231,7 +231,7 @@ class TensorContractionSubMapper { template<typename Scalar, typename Index, int side, typename Tensor, typename nocontract_t, typename contract_t, - size_t packet_size = (Tensor::PacketAccess ? packet_traits<Scalar>::size : 1), + int packet_size = (Tensor::PacketAccess ? packet_traits<Scalar>::size : 1), bool inner_dim_contiguous = false, bool inner_dim_reordered = (side != Lhs), int Alignment=Unaligned> class TensorContractionInputMapper : public BaseTensorContractionMapper<Scalar, Index, side, Tensor, nocontract_t, contract_t, packet_size, inner_dim_contiguous> { @@ -591,7 +591,7 @@ struct TensorContractionEvaluatorBase // dimensions and right non-contracting dimensions. m_lhs_inner_dim_contiguous = true; int dim_idx = 0; - int nocontract_idx = 0; + unsigned int nocontract_idx = 0; for (int i = 0; i < LDims; i++) { // find if we are contracting on index i of left tensor @@ -651,7 +651,7 @@ struct TensorContractionEvaluatorBase // each tensor, we'll only look at the first tensor here. m_rhs_inner_dim_contiguous = true; m_rhs_inner_dim_reordered = false; - for (int i = 0; i < ContractDims; i++) { + for (unsigned int i = 0; i < ContractDims; i++) { Index left = eval_op_indices[i].first; Index right = eval_op_indices[i].second; @@ -751,8 +751,8 @@ struct TensorContractionEvaluatorBase typedef typename internal::remove_const<typename EvalRightArgType::Scalar>::type RhsScalar; typedef TensorEvaluator<EvalLeftArgType, Device> LeftEvaluator; typedef TensorEvaluator<EvalRightArgType, Device> RightEvaluator; - const int lhs_packet_size = internal::packet_traits<LhsScalar>::size; - const int rhs_packet_size = internal::packet_traits<RhsScalar>::size; + const Index lhs_packet_size = internal::packet_traits<LhsScalar>::size; + const Index rhs_packet_size = internal::packet_traits<RhsScalar>::size; typedef internal::TensorContractionInputMapper<LhsScalar, Index, internal::Lhs, LeftEvaluator, left_nocontract_t, contract_t, lhs_packet_size, @@ -916,8 +916,8 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT typedef TensorEvaluator<EvalLeftArgType, Device> LeftEvaluator; typedef TensorEvaluator<EvalRightArgType, Device> RightEvaluator; - const int lhs_packet_size = internal::packet_traits<LhsScalar>::size; - const int rhs_packet_size = internal::packet_traits<RhsScalar>::size; + const Index lhs_packet_size = internal::packet_traits<LhsScalar>::size; + const Index rhs_packet_size = internal::packet_traits<RhsScalar>::size; typedef internal::TensorContractionInputMapper<LhsScalar, Index, internal::Lhs, LeftEvaluator, left_nocontract_t, diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h index 9259c864e..cb2fd53fe 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h @@ -312,15 +312,15 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT } // Make sure all the kernels are done. - for (int i = 0; i < kernel_futures.size(); ++i) { + for (size_t i = 0; i < kernel_futures.size(); ++i) { wait_until_ready(&kernel_futures[i]); } // deallocate all of the memory for both A and B's - for (int i = 0; i < blockAs.size(); i++) { + for (size_t i = 0; i < blockAs.size(); i++) { this->m_device.deallocate(blockAs[i]); } - for (int i = 0; i < blockBs.size(); i++) { + for (size_t i = 0; i < blockBs.size(); i++) { this->m_device.deallocate(blockBs[i]); } diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorIntDiv.h b/unsupported/Eigen/CXX11/src/Tensor/TensorIntDiv.h index 11c7ce443..b85f45d90 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorIntDiv.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorIntDiv.h @@ -73,7 +73,7 @@ struct TensorIntDivisor { EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T divide(const T numerator) const { const int N = 32; eigen_assert(numerator >= 0); - eigen_assert(numerator <= (1ull<<N) - 1); + eigen_assert(numerator <= static_cast<T>(1ull<<N) - 1); uint32_t t1 = (multiplier * numerator) >> 32; uint32_t t = (static_cast<uint32_t>(numerator) - t1) >> shift1; diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h b/unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h index 01ba0a80f..90ac7b6a8 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h @@ -364,7 +364,7 @@ struct TensorEvaluator<const TensorSlicingOp<StartIndices, Sizes, ArgType>, Devi } } // Use memcpy if it's going to be faster than using the regular evaluation. - if (contiguous_values > 2 * m_device.numThreads()) { + if (contiguous_values > static_cast<Index>(2 * m_device.numThreads())) { Scalar* src = m_impl.data(); for (int i = 0; i < internal::array_prod(dimensions()); i += contiguous_values) { Index offset = srcCoeff(i); |