aboutsummaryrefslogtreecommitdiffhomepage
path: root/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h
diff options
context:
space:
mode:
authorGravatar Benoit Steiner <benoit.steiner.goog@gmail.com>2015-12-17 13:39:01 -0800
committerGravatar Benoit Steiner <benoit.steiner.goog@gmail.com>2015-12-17 13:39:01 -0800
commit4aac55f684d9bd36b5f855fa5a8c2f17ca3094c9 (patch)
tree7d70c007a42527cf5e8ddcf6ae26161b7df1d5b5 /unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h
parent40e6250fc3737ff76224b04c94c2de3ce0d51607 (diff)
Silenced some compilation warnings triggered by nvcc
Diffstat (limited to 'unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h')
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h16
1 files changed, 12 insertions, 4 deletions
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h b/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h
index d93e1de1b..c28078882 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h
@@ -156,14 +156,14 @@ template <typename Expression>
class TensorExecutor<Expression, GpuDevice, false> {
public:
typedef typename Expression::Index Index;
- static void run(const Expression& expr, const GpuDevice& device);
+ EIGEN_DEVICE_FUNC static void run(const Expression& expr, const GpuDevice& device);
};
template <typename Expression>
class TensorExecutor<Expression, GpuDevice, true> {
public:
typedef typename Expression::Index Index;
- static void run(const Expression& expr, const GpuDevice& device);
+ EIGEN_DEVICE_FUNC static void run(const Expression& expr, const GpuDevice& device);
};
#if defined(__CUDACC__)
@@ -213,8 +213,9 @@ EigenMetaKernel_Vectorizable(Evaluator memcopied_eval, Index size) {
/*static*/
template <typename Expression>
-inline void TensorExecutor<Expression, GpuDevice, false>::run(const Expression& expr, const GpuDevice& device)
+EIGEN_DEVICE_FUNC inline void TensorExecutor<Expression, GpuDevice, false>::run(const Expression& expr, const GpuDevice& device)
{
+#ifndef __CUDA_ARCH__
TensorEvaluator<Expression, GpuDevice> evaluator(expr, device);
const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL);
if (needs_assign)
@@ -227,13 +228,17 @@ inline void TensorExecutor<Expression, GpuDevice, false>::run(const Expression&
LAUNCH_CUDA_KERNEL((EigenMetaKernel_NonVectorizable<TensorEvaluator<Expression, GpuDevice>, Index>), num_blocks, block_size, 0, device, evaluator, size);
}
evaluator.cleanup();
+#else
+ eigen_assert(false && "Cannot launch a kernel from another kernel");
+#endif
}
/*static*/
template<typename Expression>
-inline void TensorExecutor<Expression, GpuDevice, true>::run(const Expression& expr, const GpuDevice& device)
+EIGEN_DEVICE_FUNC inline void TensorExecutor<Expression, GpuDevice, true>::run(const Expression& expr, const GpuDevice& device)
{
+#ifndef __CUDA_ARCH__
TensorEvaluator<Expression, GpuDevice> evaluator(expr, device);
const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL);
if (needs_assign)
@@ -246,6 +251,9 @@ inline void TensorExecutor<Expression, GpuDevice, true>::run(const Expression& e
LAUNCH_CUDA_KERNEL((EigenMetaKernel_Vectorizable<TensorEvaluator<Expression, GpuDevice>, Index>), num_blocks, block_size, 0, device, evaluator, size);
}
evaluator.cleanup();
+#else
+ eigen_assert(false && "Cannot launch a kernel from another kernel");
+#endif
}
#endif // __CUDACC__