diff options
author | Benoit Steiner <benoit.steiner.goog@gmail.com> | 2016-04-19 14:55:21 -0700 |
---|---|---|
committer | Benoit Steiner <benoit.steiner.goog@gmail.com> | 2016-04-19 14:55:21 -0700 |
commit | 7129d998db0a8dd74125ad7081f3d220cbce96f0 (patch) | |
tree | 9e63ab4761eee9bb7168796939d486e95c62480a | |
parent | b9ea40c30d1d32d0f31b047aa681c384fd1a2c98 (diff) |
Simplified the code that launches cuda kernels.
-rw-r--r-- | unsupported/Eigen/CXX11/src/Tensor/TensorDeviceCuda.h | 9 | ||||
-rw-r--r-- | unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h | 2 | ||||
-rw-r--r-- | unsupported/Eigen/CXX11/src/Tensor/TensorReductionCuda.h | 12 |
3 files changed, 7 insertions, 16 deletions
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceCuda.h b/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceCuda.h index 8e7f5dddb..1d2d162dc 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceCuda.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceCuda.h @@ -291,18 +291,9 @@ struct GpuDevice { int max_blocks_; }; -#if !defined(__CUDA_ARCH__) #define LAUNCH_CUDA_KERNEL(kernel, gridsize, blocksize, sharedmem, device, ...) \ (kernel) <<< (gridsize), (blocksize), (sharedmem), (device).stream() >>> (__VA_ARGS__); \ assert(cudaGetLastError() == cudaSuccess); -#elif __CUDA_ARCH__ >= 350 -#define LAUNCH_CUDA_KERNEL(kernel, ...) \ - { const auto __attribute__((__unused__)) __makeTheKernelInstantiate = &(kernel); } \ - eigen_assert(false && "Cannot launch a kernel from another kernel" __CUDA_ARCH__ kernel); -#else -#define LAUNCH_CUDA_KERNEL(kernel, ...) \ - eigen_assert(false && "Cannot launch a kernel from another kernel" __CUDA_ARCH__ kernel); -#endif // FIXME: Should be device and kernel specific. diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h b/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h index 907da9446..bf6e10a7b 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h @@ -193,7 +193,7 @@ struct EigenMetaKernelEval { template <typename Evaluator, typename Index> struct EigenMetaKernelEval<Evaluator, Index, true> { static __device__ EIGEN_ALWAYS_INLINE - void run(Evaluator eval, Index first, Index last, Index step_size) { + void run(Evaluator& eval, Index first, Index last, Index step_size) { const Index PacketSize = unpacket_traits<typename Evaluator::PacketReturnType>::size; const Index vectorized_size = (last / PacketSize) * PacketSize; const Index vectorized_step_size = step_size * PacketSize; diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorReductionCuda.h b/unsupported/Eigen/CXX11/src/Tensor/TensorReductionCuda.h index 02193f263..d80436326 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorReductionCuda.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorReductionCuda.h @@ -126,11 +126,11 @@ struct FullReducer<Self, Op, GpuDevice, Vectorizable> { internal::is_same<typename Self::CoeffReturnType, float>::value; template <typename OutputType> - static EIGEN_DEVICE_FUNC void run(const Self&, Op&, const GpuDevice&, OutputType*) { + static void run(const Self&, Op&, const GpuDevice&, OutputType*) { assert(false && "Should only be called on floats"); } - static EIGEN_DEVICE_FUNC void run(const Self& self, Op& reducer, const GpuDevice& device, float* output) { + static void run(const Self& self, Op& reducer, const GpuDevice& device, float* output) { typedef typename Self::Index Index; const Index num_coeffs = array_prod(self.m_impl.dimensions()); @@ -226,12 +226,12 @@ struct InnerReducer<Self, Op, GpuDevice> { internal::is_same<typename Self::CoeffReturnType, float>::value; template <typename Device, typename OutputType> - static EIGEN_DEVICE_FUNC bool run(const Self&, Op&, const Device&, OutputType*, typename Self::Index, typename Self::Index) { + static bool run(const Self&, Op&, const Device&, OutputType*, typename Self::Index, typename Self::Index) { assert(false && "Should only be called to reduce floats on a gpu device"); return true; } - static EIGEN_DEVICE_FUNC bool run(const Self& self, Op& reducer, const GpuDevice& device, float* output, typename Self::Index num_coeffs_to_reduce, typename Self::Index num_preserved_vals) { + static bool run(const Self& self, Op& reducer, const GpuDevice& device, float* output, typename Self::Index num_coeffs_to_reduce, typename Self::Index num_preserved_vals) { typedef typename Self::Index Index; // It's faster to use the usual code. @@ -305,12 +305,12 @@ struct OuterReducer<Self, Op, GpuDevice> { internal::is_same<typename Self::CoeffReturnType, float>::value; template <typename Device, typename OutputType> - static EIGEN_DEVICE_FUNC bool run(const Self&, Op&, const Device&, OutputType*, typename Self::Index, typename Self::Index) { + static bool run(const Self&, Op&, const Device&, OutputType*, typename Self::Index, typename Self::Index) { assert(false && "Should only be called to reduce floats on a gpu device"); return true; } - static EIGEN_DEVICE_FUNC bool run(const Self& self, Op& reducer, const GpuDevice& device, float* output, typename Self::Index num_coeffs_to_reduce, typename Self::Index num_preserved_vals) { + static bool run(const Self& self, Op& reducer, const GpuDevice& device, float* output, typename Self::Index num_coeffs_to_reduce, typename Self::Index num_preserved_vals) { typedef typename Self::Index Index; // It's faster to use the usual code. |