// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2016 Benoit Steiner // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #define EIGEN_TEST_NO_LONGDOUBLE #define EIGEN_USE_GPU #include "main.h" #include using Eigen::Tensor; void test_cuda_nullary() { Tensor, 1, 0, int> in1(2); Tensor, 1, 0, int> in2(2); in1.setRandom(); in2.setRandom(); std::size_t float_bytes = in1.size() * sizeof(float); std::size_t complex_bytes = in1.size() * sizeof(std::complex); std::complex* d_in1; std::complex* d_in2; float* d_out2; cudaMalloc((void**)(&d_in1), complex_bytes); cudaMalloc((void**)(&d_in2), complex_bytes); cudaMalloc((void**)(&d_out2), float_bytes); cudaMemcpy(d_in1, in1.data(), complex_bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_in2, in2.data(), complex_bytes, cudaMemcpyHostToDevice); Eigen::GpuStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap, 1, 0, int>, Eigen::Aligned> gpu_in1( d_in1, 2); Eigen::TensorMap, 1, 0, int>, Eigen::Aligned> gpu_in2( d_in2, 2); Eigen::TensorMap, Eigen::Aligned> gpu_out2( d_out2, 2); gpu_in1.device(gpu_device) = gpu_in1.constant(std::complex(3.14f, 2.7f)); gpu_out2.device(gpu_device) = gpu_in2.abs(); Tensor, 1, 0, int> new1(2); Tensor new2(2); assert(cudaMemcpyAsync(new1.data(), d_in1, complex_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess); assert(cudaMemcpyAsync(new2.data(), d_out2, float_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess); assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess); for (int i = 0; i < 2; ++i) { VERIFY_IS_APPROX(new1(i), std::complex(3.14f, 2.7f)); VERIFY_IS_APPROX(new2(i), std::abs(in2(i))); } cudaFree(d_in1); cudaFree(d_in2); cudaFree(d_out2); } static void test_cuda_sum_reductions() { Eigen::GpuStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); const int num_rows = internal::random(1024, 5*1024); const int num_cols = internal::random(1024, 5*1024); Tensor, 2> in(num_rows, num_cols); in.setRandom(); Tensor, 0> full_redux; full_redux = in.sum(); std::size_t in_bytes = in.size() * sizeof(std::complex); std::size_t out_bytes = full_redux.size() * sizeof(std::complex); std::complex* gpu_in_ptr = static_cast*>(gpu_device.allocate(in_bytes)); std::complex* gpu_out_ptr = static_cast*>(gpu_device.allocate(out_bytes)); gpu_device.memcpyHostToDevice(gpu_in_ptr, in.data(), in_bytes); TensorMap, 2> > in_gpu(gpu_in_ptr, num_rows, num_cols); TensorMap, 0> > out_gpu(gpu_out_ptr); out_gpu.device(gpu_device) = in_gpu.sum(); Tensor, 0> full_redux_gpu; gpu_device.memcpyDeviceToHost(full_redux_gpu.data(), gpu_out_ptr, out_bytes); gpu_device.synchronize(); // Check that the CPU and GPU reductions return the same result. VERIFY_IS_APPROX(full_redux(), full_redux_gpu()); gpu_device.deallocate(gpu_in_ptr); gpu_device.deallocate(gpu_out_ptr); } static void test_cuda_mean_reductions() { Eigen::GpuStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); const int num_rows = internal::random(1024, 5*1024); const int num_cols = internal::random(1024, 5*1024); Tensor, 2> in(num_rows, num_cols); in.setRandom(); Tensor, 0> full_redux; full_redux = in.mean(); std::size_t in_bytes = in.size() * sizeof(std::complex); std::size_t out_bytes = full_redux.size() * sizeof(std::complex); std::complex* gpu_in_ptr = static_cast*>(gpu_device.allocate(in_bytes)); std::complex* gpu_out_ptr = static_cast*>(gpu_device.allocate(out_bytes)); gpu_device.memcpyHostToDevice(gpu_in_ptr, in.data(), in_bytes); TensorMap, 2> > in_gpu(gpu_in_ptr, num_rows, num_cols); TensorMap, 0> > out_gpu(gpu_out_ptr); out_gpu.device(gpu_device) = in_gpu.mean(); Tensor, 0> full_redux_gpu; gpu_device.memcpyDeviceToHost(full_redux_gpu.data(), gpu_out_ptr, out_bytes); gpu_device.synchronize(); // Check that the CPU and GPU reductions return the same result. VERIFY_IS_APPROX(full_redux(), full_redux_gpu()); gpu_device.deallocate(gpu_in_ptr); gpu_device.deallocate(gpu_out_ptr); } static void test_cuda_product_reductions() { Eigen::GpuStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); const int num_rows = internal::random(1024, 5*1024); const int num_cols = internal::random(1024, 5*1024); Tensor, 2> in(num_rows, num_cols); in.setRandom(); Tensor, 0> full_redux; full_redux = in.prod(); std::size_t in_bytes = in.size() * sizeof(std::complex); std::size_t out_bytes = full_redux.size() * sizeof(std::complex); std::complex* gpu_in_ptr = static_cast*>(gpu_device.allocate(in_bytes)); std::complex* gpu_out_ptr = static_cast*>(gpu_device.allocate(out_bytes)); gpu_device.memcpyHostToDevice(gpu_in_ptr, in.data(), in_bytes); TensorMap, 2> > in_gpu(gpu_in_ptr, num_rows, num_cols); TensorMap, 0> > out_gpu(gpu_out_ptr); out_gpu.device(gpu_device) = in_gpu.prod(); Tensor, 0> full_redux_gpu; gpu_device.memcpyDeviceToHost(full_redux_gpu.data(), gpu_out_ptr, out_bytes); gpu_device.synchronize(); // Check that the CPU and GPU reductions return the same result. VERIFY_IS_APPROX(full_redux(), full_redux_gpu()); gpu_device.deallocate(gpu_in_ptr); gpu_device.deallocate(gpu_out_ptr); } EIGEN_DECLARE_TEST(test_cxx11_tensor_complex) { CALL_SUBTEST(test_cuda_nullary()); CALL_SUBTEST(test_cuda_sum_reductions()); CALL_SUBTEST(test_cuda_mean_reductions()); CALL_SUBTEST(test_cuda_product_reductions()); }