diff options
author | Benoit Steiner <benoit.steiner.goog@gmail.com> | 2015-12-17 13:39:01 -0800 |
---|---|---|
committer | Benoit Steiner <benoit.steiner.goog@gmail.com> | 2015-12-17 13:39:01 -0800 |
commit | 4aac55f684d9bd36b5f855fa5a8c2f17ca3094c9 (patch) | |
tree | 7d70c007a42527cf5e8ddcf6ae26161b7df1d5b5 /unsupported/Eigen | |
parent | 40e6250fc3737ff76224b04c94c2de3ce0d51607 (diff) |
Silenced some compilation warnings triggered by nvcc
Diffstat (limited to 'unsupported/Eigen')
-rw-r--r-- | unsupported/Eigen/CXX11/src/Tensor/TensorDeviceCuda.h | 7 | ||||
-rw-r--r-- | unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h | 16 | ||||
-rw-r--r-- | unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h | 6 |
3 files changed, 20 insertions, 9 deletions
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceCuda.h b/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceCuda.h index c76d1ee3f..4d7570077 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceCuda.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceCuda.h @@ -238,11 +238,14 @@ struct GpuDevice { }; - +#ifndef __CUDA_ARCH__ #define LAUNCH_CUDA_KERNEL(kernel, gridsize, blocksize, sharedmem, device, ...) \ (kernel) <<< (gridsize), (blocksize), (sharedmem), (device).stream() >>> (__VA_ARGS__); \ assert(cudaGetLastError() == cudaSuccess); - +#else +#define LAUNCH_CUDA_KERNEL(...) \ + eigen_assert(false && "Cannot launch a kernel from another kernel"); +#endif // FIXME: Should be device and kernel specific. #ifdef __CUDACC__ diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h b/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h index d93e1de1b..c28078882 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h @@ -156,14 +156,14 @@ template <typename Expression> class TensorExecutor<Expression, GpuDevice, false> { public: typedef typename Expression::Index Index; - static void run(const Expression& expr, const GpuDevice& device); + EIGEN_DEVICE_FUNC static void run(const Expression& expr, const GpuDevice& device); }; template <typename Expression> class TensorExecutor<Expression, GpuDevice, true> { public: typedef typename Expression::Index Index; - static void run(const Expression& expr, const GpuDevice& device); + EIGEN_DEVICE_FUNC static void run(const Expression& expr, const GpuDevice& device); }; #if defined(__CUDACC__) @@ -213,8 +213,9 @@ EigenMetaKernel_Vectorizable(Evaluator memcopied_eval, Index size) { /*static*/ template <typename Expression> -inline void TensorExecutor<Expression, GpuDevice, false>::run(const Expression& expr, const GpuDevice& device) +EIGEN_DEVICE_FUNC inline void TensorExecutor<Expression, GpuDevice, false>::run(const Expression& expr, const GpuDevice& device) { +#ifndef __CUDA_ARCH__ TensorEvaluator<Expression, GpuDevice> evaluator(expr, device); const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL); if (needs_assign) @@ -227,13 +228,17 @@ inline void TensorExecutor<Expression, GpuDevice, false>::run(const Expression& LAUNCH_CUDA_KERNEL((EigenMetaKernel_NonVectorizable<TensorEvaluator<Expression, GpuDevice>, Index>), num_blocks, block_size, 0, device, evaluator, size); } evaluator.cleanup(); +#else + eigen_assert(false && "Cannot launch a kernel from another kernel"); +#endif } /*static*/ template<typename Expression> -inline void TensorExecutor<Expression, GpuDevice, true>::run(const Expression& expr, const GpuDevice& device) +EIGEN_DEVICE_FUNC inline void TensorExecutor<Expression, GpuDevice, true>::run(const Expression& expr, const GpuDevice& device) { +#ifndef __CUDA_ARCH__ TensorEvaluator<Expression, GpuDevice> evaluator(expr, device); const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL); if (needs_assign) @@ -246,6 +251,9 @@ inline void TensorExecutor<Expression, GpuDevice, true>::run(const Expression& e LAUNCH_CUDA_KERNEL((EigenMetaKernel_Vectorizable<TensorEvaluator<Expression, GpuDevice>, Index>), num_blocks, block_size, 0, device, evaluator, size); } evaluator.cleanup(); +#else + eigen_assert(false && "Cannot launch a kernel from another kernel"); +#endif } #endif // __CUDACC__ diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h b/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h index bd15295b8..aaa877185 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h @@ -454,7 +454,7 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType>, Device> input_strides[i] = input_strides[i + 1] * input_dims[i + 1]; } } - + int outputIndex = 0; int reduceIndex = 0; for (int i = 0; i < NumInputDims; ++i) { @@ -473,13 +473,13 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType>, Device> m_preservedStrides[0] = internal::array_prod(input_dims); } } - + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } typedef typename internal::remove_const<typename XprType::CoeffReturnType>::type CoeffReturnType; typedef typename internal::remove_const<typename XprType::PacketReturnType>::type PacketReturnType; - EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType* data) { + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType* data) { m_impl.evalSubExprsIfNeeded(NULL); // Use the FullReducer if possible. |