diff options
author | Benoit Steiner <benoit.steiner.goog@gmail.com> | 2016-06-29 15:32:47 -0700 |
---|---|---|
committer | Benoit Steiner <benoit.steiner.goog@gmail.com> | 2016-06-29 15:32:47 -0700 |
commit | b2a47641ce0ad0642d93db0030cbf8cd0bb7f2c0 (patch) | |
tree | fd1717e971f39dd47f650cb856585fc330c26182 /unsupported/Eigen/CXX11/src/Tensor | |
parent | b047ca765f033b276036b209560cfb0d32a23155 (diff) |
Made the code compile when using CUDA architecture < 300
Diffstat (limited to 'unsupported/Eigen/CXX11/src/Tensor')
-rw-r--r-- | unsupported/Eigen/CXX11/src/Tensor/TensorReductionCuda.h | 8 |
1 files changed, 8 insertions, 0 deletions
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorReductionCuda.h b/unsupported/Eigen/CXX11/src/Tensor/TensorReductionCuda.h index d9bbcd858..5e512490c 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorReductionCuda.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorReductionCuda.h @@ -117,6 +117,7 @@ template <int BlockSize, int NumPerThread, typename Self, typename Reducer, typename Index> __global__ void FullReductionKernel(Reducer reducer, const Self input, Index num_coeffs, typename Self::CoeffReturnType* output, unsigned int* semaphore) { +#if __CUDA_ARCH__ >= 300 // Initialize the output value const Index first_index = blockIdx.x * BlockSize * NumPerThread + threadIdx.x; if (gridDim.x == 1) { @@ -171,6 +172,9 @@ __global__ void FullReductionKernel(Reducer reducer, const Self input, Index num // Let the last block reset the semaphore atomicInc(semaphore, gridDim.x + 1); } +#else + assert(0 && "Shouldn't be called on unsupported device"); +#endif } @@ -355,6 +359,7 @@ template <int NumPerThread, typename Self, typename Reducer, typename Index> __global__ void InnerReductionKernel(Reducer reducer, const Self input, Index num_coeffs_to_reduce, Index num_preserved_coeffs, typename Self::CoeffReturnType* output) { +#if __CUDA_ARCH__ >= 300 eigen_assert(blockDim.y == 1); eigen_assert(blockDim.z == 1); eigen_assert(gridDim.y == 1); @@ -414,6 +419,9 @@ __global__ void InnerReductionKernel(Reducer reducer, const Self input, Index nu } } } +#else + assert(0 && "Shouldn't be called on unsupported device"); +#endif } #ifdef EIGEN_HAS_CUDA_FP16 |