From 949a2da38cbfebe358a25dc59b47abb67beb4126 Mon Sep 17 00:00:00 2001 From: RJ Ryan Date: Fri, 14 Apr 2017 13:23:35 -0700 Subject: Use scalar_sum_op and scalar_quotient_op instead of operator+ and operator/ in MeanReducer. Improves support for std::complex types when compiling for CUDA. Expands on e2e9cdd16970914cf0a892fea5e7c4402b3ede41 and 2bda1b0d93fb627d0c500ec48b20302d44c32cb7 . --- .../Eigen/CXX11/src/Tensor/TensorFunctors.h | 8 +++-- unsupported/test/cxx11_tensor_complex_cuda.cu | 36 ++++++++++++++++++++++ 2 files changed, 42 insertions(+), 2 deletions(-) diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorFunctors.h b/unsupported/Eigen/CXX11/src/Tensor/TensorFunctors.h index 3b4f8eda1..5dcc3794c 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorFunctors.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorFunctors.h @@ -166,7 +166,8 @@ template struct MeanReducer return pset1(initialize()); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalize(const T accum) const { - return accum / scalarCount_; + internal::scalar_quotient_op quotient_op; + return quotient_op(accum, T(scalarCount_)); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet finalizePacket(const Packet& vaccum) const { @@ -175,7 +176,10 @@ template struct MeanReducer template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalizeBoth(const T saccum, const Packet& vaccum) const { internal::scalar_sum_op sum_op; - return sum_op(saccum, predux(vaccum)) / (scalarCount_ + packetCount_ * unpacket_traits::size); + internal::scalar_quotient_op quotient_op; + return quotient_op( + sum_op(saccum, predux(vaccum)), + T(scalarCount_ + packetCount_ * unpacket_traits::size)); } protected: diff --git a/unsupported/test/cxx11_tensor_complex_cuda.cu b/unsupported/test/cxx11_tensor_complex_cuda.cu index d4e111f5d..87cf28920 100644 --- a/unsupported/test/cxx11_tensor_complex_cuda.cu +++ b/unsupported/test/cxx11_tensor_complex_cuda.cu @@ -107,6 +107,41 @@ static void test_cuda_sum_reductions() { gpu_device.deallocate(gpu_out_ptr); } +static void test_cuda_mean_reductions() { + + Eigen::CudaStreamDevice stream; + Eigen::GpuDevice gpu_device(&stream); + + const int num_rows = internal::random(1024, 5*1024); + const int num_cols = internal::random(1024, 5*1024); + + Tensor, 2> in(num_rows, num_cols); + in.setRandom(); + + Tensor, 0> full_redux; + full_redux = in.mean(); + + std::size_t in_bytes = in.size() * sizeof(std::complex); + std::size_t out_bytes = full_redux.size() * sizeof(std::complex); + std::complex* gpu_in_ptr = static_cast*>(gpu_device.allocate(in_bytes)); + std::complex* gpu_out_ptr = static_cast*>(gpu_device.allocate(out_bytes)); + gpu_device.memcpyHostToDevice(gpu_in_ptr, in.data(), in_bytes); + + TensorMap, 2> > in_gpu(gpu_in_ptr, num_rows, num_cols); + TensorMap, 0> > out_gpu(gpu_out_ptr); + + out_gpu.device(gpu_device) = in_gpu.mean(); + + Tensor, 0> full_redux_gpu; + gpu_device.memcpyDeviceToHost(full_redux_gpu.data(), gpu_out_ptr, out_bytes); + gpu_device.synchronize(); + + // Check that the CPU and GPU reductions return the same result. + VERIFY_IS_APPROX(full_redux(), full_redux_gpu()); + + gpu_device.deallocate(gpu_in_ptr); + gpu_device.deallocate(gpu_out_ptr); +} static void test_cuda_product_reductions() { @@ -149,5 +184,6 @@ void test_cxx11_tensor_complex() { CALL_SUBTEST(test_cuda_nullary()); CALL_SUBTEST(test_cuda_sum_reductions()); + CALL_SUBTEST(test_cuda_mean_reductions()); CALL_SUBTEST(test_cuda_product_reductions()); } -- cgit v1.2.3