diff options
author | Mehdi Goli <mehdi.goli@codeplay.com> | 2016-11-08 17:08:02 +0000 |
---|---|---|
committer | Mehdi Goli <mehdi.goli@codeplay.com> | 2016-11-08 17:08:02 +0000 |
commit | d57430dd73ab2f88aa5e45c370f6ab91103ff18a (patch) | |
tree | d3d46d788686c38b1da1cb696807d51334829e5a /unsupported/test | |
parent | dad177be010b45ba42425ab04af6dde6c479453b (diff) |
Converting all sycl buffers to uninitialised device only buffers; adding memcpyHostToDevice and memcpyDeviceToHost on syclDevice; modifying all examples to obey the new rules; moving sycl queue creating to the device based on Benoit suggestion; removing the sycl specefic condition for returning m_result in TensorReduction.h according to Benoit suggestion.
Diffstat (limited to 'unsupported/test')
-rw-r--r-- | unsupported/test/cxx11_tensor_broadcast_sycl.cpp | 79 | ||||
-rw-r--r-- | unsupported/test/cxx11_tensor_device_sycl.cpp | 20 | ||||
-rw-r--r-- | unsupported/test/cxx11_tensor_forced_eval_sycl.cpp | 44 | ||||
-rw-r--r-- | unsupported/test/cxx11_tensor_reduction_sycl.cpp | 147 | ||||
-rw-r--r-- | unsupported/test/cxx11_tensor_sycl.cpp | 67 |
5 files changed, 167 insertions, 190 deletions
diff --git a/unsupported/test/cxx11_tensor_broadcast_sycl.cpp b/unsupported/test/cxx11_tensor_broadcast_sycl.cpp index ecebf7d68..7201bfe37 100644 --- a/unsupported/test/cxx11_tensor_broadcast_sycl.cpp +++ b/unsupported/test/cxx11_tensor_broadcast_sycl.cpp @@ -25,55 +25,50 @@ using Eigen::SyclDevice; using Eigen::Tensor; using Eigen::TensorMap; -// Types used in tests: -using TestTensor = Tensor<float, 3>; -using TestTensorMap = TensorMap<Tensor<float, 3>>; -static void test_broadcast_sycl(){ +static void test_broadcast_sycl(const Eigen::SyclDevice &sycl_device){ - cl::sycl::gpu_selector s; - cl::sycl::queue q(s, [=](cl::sycl::exception_list l) { - for (const auto& e : l) { - try { - std::rethrow_exception(e); - } catch (cl::sycl::exception e) { - std::cout << e.what() << std::endl; - } - } - }); - SyclDevice sycl_device(q); - // BROADCAST test: - array<int, 4> in_range = {{2, 3, 5, 7}}; - array<int, in_range.size()> broadcasts = {{2, 3, 1, 4}}; - array<int, in_range.size()> out_range; // = in_range * broadcasts - for (size_t i = 0; i < out_range.size(); ++i) - out_range[i] = in_range[i] * broadcasts[i]; + // BROADCAST test: + array<int, 4> in_range = {{2, 3, 5, 7}}; + array<int, 4> broadcasts = {{2, 3, 1, 4}}; + array<int, 4> out_range; // = in_range * broadcasts + for (size_t i = 0; i < out_range.size(); ++i) + out_range[i] = in_range[i] * broadcasts[i]; + + Tensor<float, 4> input(in_range); + Tensor<float, 4> out(out_range); - Tensor<float, in_range.size()> input(in_range); - Tensor<float, out_range.size()> output(out_range); + for (size_t i = 0; i < in_range.size(); ++i) + VERIFY_IS_EQUAL(out.dimension(i), out_range[i]); - for (int i = 0; i < input.size(); ++i) - input(i) = static_cast<float>(i); - TensorMap<decltype(input)> gpu_in(input.data(), in_range); - TensorMap<decltype(output)> gpu_out(output.data(), out_range); - gpu_out.device(sycl_device) = gpu_in.broadcast(broadcasts); - sycl_device.deallocate(output.data()); + for (int i = 0; i < input.size(); ++i) + input(i) = static_cast<float>(i); - for (size_t i = 0; i < in_range.size(); ++i) - VERIFY_IS_EQUAL(output.dimension(i), out_range[i]); + float * gpu_in_data = static_cast<float*>(sycl_device.allocate(input.dimensions().TotalSize()*sizeof(float))); + float * gpu_out_data = static_cast<float*>(sycl_device.allocate(out.dimensions().TotalSize()*sizeof(float))); - for (int i = 0; i < 4; ++i) { - for (int j = 0; j < 9; ++j) { - for (int k = 0; k < 5; ++k) { - for (int l = 0; l < 28; ++l) { - VERIFY_IS_APPROX(input(i%2,j%3,k%5,l%7), output(i,j,k,l)); - } - } - } - } - printf("Broadcast Test Passed\n"); + TensorMap<Tensor<float, 4>> gpu_in(gpu_in_data, in_range); + TensorMap<Tensor<float, 4>> gpu_out(gpu_out_data, out_range); + sycl_device.memcpyHostToDevice(gpu_in_data, input.data(),(input.dimensions().TotalSize())*sizeof(float)); + gpu_out.device(sycl_device) = gpu_in.broadcast(broadcasts); + sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(float)); + + for (int i = 0; i < 4; ++i) { + for (int j = 0; j < 9; ++j) { + for (int k = 0; k < 5; ++k) { + for (int l = 0; l < 28; ++l) { + VERIFY_IS_APPROX(input(i%2,j%3,k%5,l%7), out(i,j,k,l)); + } + } + } + } + printf("Broadcast Test Passed\n"); + sycl_device.deallocate(gpu_in_data); + sycl_device.deallocate(gpu_out_data); } void test_cxx11_tensor_broadcast_sycl() { - CALL_SUBTEST(test_broadcast_sycl()); + cl::sycl::gpu_selector s; + Eigen::SyclDevice sycl_device(s); + CALL_SUBTEST(test_broadcast_sycl(sycl_device)); } diff --git a/unsupported/test/cxx11_tensor_device_sycl.cpp b/unsupported/test/cxx11_tensor_device_sycl.cpp index f54fc8786..7f79753c5 100644 --- a/unsupported/test/cxx11_tensor_device_sycl.cpp +++ b/unsupported/test/cxx11_tensor_device_sycl.cpp @@ -20,20 +20,12 @@ #include "main.h" #include <unsupported/Eigen/CXX11/Tensor> -void test_device_sycl() { - cl::sycl::gpu_selector s; - cl::sycl::queue q(s, [=](cl::sycl::exception_list l) { - for (const auto& e : l) { - try { - std::rethrow_exception(e); - } catch (cl::sycl::exception e) { - std::cout << e.what() << std::endl; - } - } - }); - Eigen::SyclDevice sycl_device(q); - printf("Helo from ComputeCpp: Device Exists\n"); +void test_device_sycl(const Eigen::SyclDevice &sycl_device) { + std::cout <<"Helo from ComputeCpp: the requested device exists and the device name is : " + << sycl_device.m_queue.get_device(). template get_info<cl::sycl::info::device::name>() <<std::endl;; } void test_cxx11_tensor_device_sycl() { - CALL_SUBTEST(test_device_sycl()); + cl::sycl::gpu_selector s; + Eigen::SyclDevice sycl_device(s); + CALL_SUBTEST(test_device_sycl(sycl_device)); } diff --git a/unsupported/test/cxx11_tensor_forced_eval_sycl.cpp b/unsupported/test/cxx11_tensor_forced_eval_sycl.cpp index 182ec7fa8..5690da723 100644 --- a/unsupported/test/cxx11_tensor_forced_eval_sycl.cpp +++ b/unsupported/test/cxx11_tensor_forced_eval_sycl.cpp @@ -22,18 +22,7 @@ using Eigen::Tensor; -void test_forced_eval_sycl() { - cl::sycl::gpu_selector s; - cl::sycl::queue q(s, [=](cl::sycl::exception_list l) { - for (const auto& e : l) { - try { - std::rethrow_exception(e); - } catch (cl::sycl::exception e) { - std::cout << e.what() << std::endl; - } - } - }); - SyclDevice sycl_device(q); +void test_forced_eval_sycl(const Eigen::SyclDevice &sycl_device) { int sizeDim1 = 100; int sizeDim2 = 200; @@ -43,17 +32,22 @@ void test_forced_eval_sycl() { Eigen::Tensor<float, 3> in2(tensorRange); Eigen::Tensor<float, 3> out(tensorRange); + float * gpu_in1_data = static_cast<float*>(sycl_device.allocate(in1.dimensions().TotalSize()*sizeof(float))); + float * gpu_in2_data = static_cast<float*>(sycl_device.allocate(in2.dimensions().TotalSize()*sizeof(float))); + float * gpu_out_data = static_cast<float*>(sycl_device.allocate(out.dimensions().TotalSize()*sizeof(float))); + in1 = in1.random() + in1.constant(10.0f); in2 = in2.random() + in2.constant(10.0f); - // creating TensorMap from tensor - Eigen::TensorMap<Eigen::Tensor<float, 3>> gpu_in1(in1.data(), tensorRange); - Eigen::TensorMap<Eigen::Tensor<float, 3>> gpu_in2(in2.data(), tensorRange); - Eigen::TensorMap<Eigen::Tensor<float, 3>> gpu_out(out.data(), tensorRange); - + // creating TensorMap from tensor + Eigen::TensorMap<Eigen::Tensor<float, 3>> gpu_in1(gpu_in1_data, tensorRange); + Eigen::TensorMap<Eigen::Tensor<float, 3>> gpu_in2(gpu_in2_data, tensorRange); + Eigen::TensorMap<Eigen::Tensor<float, 3>> gpu_out(gpu_out_data, tensorRange); + sycl_device.memcpyHostToDevice(gpu_in1_data, in1.data(),(in1.dimensions().TotalSize())*sizeof(float)); + sycl_device.memcpyHostToDevice(gpu_in2_data, in2.data(),(in1.dimensions().TotalSize())*sizeof(float)); /// c=(a+b)*b - gpu_out.device(sycl_device) =(gpu_in1 + gpu_in2).eval() * gpu_in2; - sycl_device.deallocate(out.data()); + gpu_out.device(sycl_device) =(gpu_in1 + gpu_in2).eval() * gpu_in2; + sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(float)); for (int i = 0; i < sizeDim1; ++i) { for (int j = 0; j < sizeDim2; ++j) { for (int k = 0; k < sizeDim3; ++k) { @@ -62,7 +56,15 @@ void test_forced_eval_sycl() { } } } - printf("(a+b)*b Test Passed\n"); + printf("(a+b)*b Test Passed\n"); + sycl_device.deallocate(gpu_in1_data); + sycl_device.deallocate(gpu_in2_data); + sycl_device.deallocate(gpu_out_data); + } -void test_cxx11_tensor_forced_eval_sycl() { CALL_SUBTEST(test_forced_eval_sycl()); } +void test_cxx11_tensor_forced_eval_sycl() { + cl::sycl::gpu_selector s; + Eigen::SyclDevice sycl_device(s); + CALL_SUBTEST(test_forced_eval_sycl(sycl_device)); +} diff --git a/unsupported/test/cxx11_tensor_reduction_sycl.cpp b/unsupported/test/cxx11_tensor_reduction_sycl.cpp index bd09744a6..a9ef82907 100644 --- a/unsupported/test/cxx11_tensor_reduction_sycl.cpp +++ b/unsupported/test/cxx11_tensor_reduction_sycl.cpp @@ -22,126 +22,117 @@ -static void test_full_reductions_sycl() { - - - cl::sycl::gpu_selector s; - cl::sycl::queue q(s, [=](cl::sycl::exception_list l) { - for (const auto& e : l) { - try { - std::rethrow_exception(e); - } catch (cl::sycl::exception e) { - std::cout << e.what() << std::endl; - } - } - }); - Eigen::SyclDevice sycl_device(q); +static void test_full_reductions_sycl(const Eigen::SyclDevice& sycl_device) { const int num_rows = 452; const int num_cols = 765; array<int, 2> tensorRange = {{num_rows, num_cols}}; Tensor<float, 2> in(tensorRange); + Tensor<float, 0> full_redux; + Tensor<float, 0> full_redux_gpu; + in.setRandom(); - Tensor<float, 0> full_redux; - Tensor<float, 0> full_redux_g; full_redux = in.sum(); - float* out_data = (float*)sycl_device.allocate(sizeof(float)); - TensorMap<Tensor<float, 2> > in_gpu(in.data(), tensorRange); - TensorMap<Tensor<float, 0> > full_redux_gpu(out_data); - full_redux_gpu.device(sycl_device) = in_gpu.sum(); - sycl_device.deallocate(out_data); - // Check that the CPU and GPU reductions return the same result. - VERIFY_IS_APPROX(full_redux_gpu(), full_redux()); -} + float* gpu_in_data = static_cast<float*>(sycl_device.allocate(in.dimensions().TotalSize()*sizeof(float))); + float* gpu_out_data =(float*)sycl_device.allocate(sizeof(float)); + TensorMap<Tensor<float, 2> > in_gpu(gpu_in_data, tensorRange); + TensorMap<Tensor<float, 0> > out_gpu(gpu_out_data); -static void test_first_dim_reductions_sycl() { + sycl_device.memcpyHostToDevice(gpu_in_data, in.data(),(in.dimensions().TotalSize())*sizeof(float)); + out_gpu.device(sycl_device) = in_gpu.sum(); + sycl_device.memcpyDeviceToHost(full_redux_gpu.data(), gpu_out_data, sizeof(float)); + // Check that the CPU and GPU reductions return the same result. + VERIFY_IS_APPROX(full_redux_gpu(), full_redux()); + sycl_device.deallocate(gpu_in_data); + sycl_device.deallocate(gpu_out_data); +} - cl::sycl::gpu_selector s; - cl::sycl::queue q(s, [=](cl::sycl::exception_list l) { - for (const auto& e : l) { - try { - std::rethrow_exception(e); - } catch (cl::sycl::exception e) { - std::cout << e.what() << std::endl; - } - } - }); - Eigen::SyclDevice sycl_device(q); +static void test_first_dim_reductions_sycl(const Eigen::SyclDevice& sycl_device) { int dim_x = 145; int dim_y = 1; int dim_z = 67; array<int, 3> tensorRange = {{dim_x, dim_y, dim_z}}; - - Tensor<float, 3> in(tensorRange); - in.setRandom(); Eigen::array<int, 1> red_axis; red_axis[0] = 0; - Tensor<float, 2> redux = in.sum(red_axis); array<int, 2> reduced_tensorRange = {{dim_y, dim_z}}; - Tensor<float, 2> redux_g(reduced_tensorRange); - TensorMap<Tensor<float, 3> > in_gpu(in.data(), tensorRange); - float* out_data = (float*)sycl_device.allocate(dim_y*dim_z*sizeof(float)); - TensorMap<Tensor<float, 2> > redux_gpu(out_data, dim_y, dim_z ); - redux_gpu.device(sycl_device) = in_gpu.sum(red_axis); - sycl_device.deallocate(out_data); - // Check that the CPU and GPU reductions return the same result. - for(int j=0; j<dim_y; j++ ) - for(int k=0; k<dim_z; k++ ) - VERIFY_IS_APPROX(redux_gpu(j,k), redux(j,k)); -} + Tensor<float, 3> in(tensorRange); + Tensor<float, 2> redux(reduced_tensorRange); + Tensor<float, 2> redux_gpu(reduced_tensorRange); + + in.setRandom(); + redux= in.sum(red_axis); -static void test_last_dim_reductions_sycl() { + float* gpu_in_data = static_cast<float*>(sycl_device.allocate(in.dimensions().TotalSize()*sizeof(float))); + float* gpu_out_data = static_cast<float*>(sycl_device.allocate(redux_gpu.dimensions().TotalSize()*sizeof(float))); + TensorMap<Tensor<float, 3> > in_gpu(gpu_in_data, tensorRange); + TensorMap<Tensor<float, 2> > out_gpu(gpu_out_data, reduced_tensorRange); - cl::sycl::gpu_selector s; - cl::sycl::queue q(s, [=](cl::sycl::exception_list l) { - for (const auto& e : l) { - try { - std::rethrow_exception(e); - } catch (cl::sycl::exception e) { - std::cout << e.what() << std::endl; - } - } - }); - Eigen::SyclDevice sycl_device(q); + sycl_device.memcpyHostToDevice(gpu_in_data, in.data(),(in.dimensions().TotalSize())*sizeof(float)); + out_gpu.device(sycl_device) = in_gpu.sum(red_axis); + sycl_device.memcpyDeviceToHost(redux_gpu.data(), gpu_out_data, redux_gpu.dimensions().TotalSize()*sizeof(float)); + + // Check that the CPU and GPU reductions return the same result. + for(int j=0; j<reduced_tensorRange[0]; j++ ) + for(int k=0; k<reduced_tensorRange[1]; k++ ) + VERIFY_IS_APPROX(redux_gpu(j,k), redux(j,k)); + + sycl_device.deallocate(gpu_in_data); + sycl_device.deallocate(gpu_out_data); +} + +static void test_last_dim_reductions_sycl(const Eigen::SyclDevice &sycl_device) { int dim_x = 567; int dim_y = 1; int dim_z = 47; array<int, 3> tensorRange = {{dim_x, dim_y, dim_z}}; - - Tensor<float, 3> in(tensorRange); - in.setRandom(); Eigen::array<int, 1> red_axis; red_axis[0] = 2; - Tensor<float, 2> redux = in.sum(red_axis); array<int, 2> reduced_tensorRange = {{dim_x, dim_y}}; - Tensor<float, 2> redux_g(reduced_tensorRange); - TensorMap<Tensor<float, 3> > in_gpu(in.data(), tensorRange); - float* out_data = (float*)sycl_device.allocate(dim_x*dim_y*sizeof(float)); - TensorMap<Tensor<float, 2> > redux_gpu(out_data, dim_x, dim_y ); - redux_gpu.device(sycl_device) = in_gpu.sum(red_axis); - sycl_device.deallocate(out_data); + Tensor<float, 3> in(tensorRange); + Tensor<float, 2> redux(reduced_tensorRange); + Tensor<float, 2> redux_gpu(reduced_tensorRange); + + in.setRandom(); + + redux= in.sum(red_axis); + + float* gpu_in_data = static_cast<float*>(sycl_device.allocate(in.dimensions().TotalSize()*sizeof(float))); + float* gpu_out_data = static_cast<float*>(sycl_device.allocate(redux_gpu.dimensions().TotalSize()*sizeof(float))); + + TensorMap<Tensor<float, 3> > in_gpu(gpu_in_data, tensorRange); + TensorMap<Tensor<float, 2> > out_gpu(gpu_out_data, reduced_tensorRange); + + sycl_device.memcpyHostToDevice(gpu_in_data, in.data(),(in.dimensions().TotalSize())*sizeof(float)); + out_gpu.device(sycl_device) = in_gpu.sum(red_axis); + sycl_device.memcpyDeviceToHost(redux_gpu.data(), gpu_out_data, redux_gpu.dimensions().TotalSize()*sizeof(float)); // Check that the CPU and GPU reductions return the same result. - for(int j=0; j<dim_x; j++ ) - for(int k=0; k<dim_y; k++ ) + for(int j=0; j<reduced_tensorRange[0]; j++ ) + for(int k=0; k<reduced_tensorRange[1]; k++ ) VERIFY_IS_APPROX(redux_gpu(j,k), redux(j,k)); + + sycl_device.deallocate(gpu_in_data); + sycl_device.deallocate(gpu_out_data); + } void test_cxx11_tensor_reduction_sycl() { - CALL_SUBTEST((test_full_reductions_sycl())); - CALL_SUBTEST((test_first_dim_reductions_sycl())); - CALL_SUBTEST((test_last_dim_reductions_sycl())); + cl::sycl::gpu_selector s; + Eigen::SyclDevice sycl_device(s); + CALL_SUBTEST((test_full_reductions_sycl(sycl_device))); + CALL_SUBTEST((test_first_dim_reductions_sycl(sycl_device))); + CALL_SUBTEST((test_last_dim_reductions_sycl(sycl_device))); } diff --git a/unsupported/test/cxx11_tensor_sycl.cpp b/unsupported/test/cxx11_tensor_sycl.cpp index 0f66cd8f0..6a9c33422 100644 --- a/unsupported/test/cxx11_tensor_sycl.cpp +++ b/unsupported/test/cxx11_tensor_sycl.cpp @@ -27,42 +27,33 @@ using Eigen::SyclDevice; using Eigen::Tensor; using Eigen::TensorMap; -// Types used in tests: -using TestTensor = Tensor<float, 3>; -using TestTensorMap = TensorMap<Tensor<float, 3>>; - -void test_sycl_cpu() { - cl::sycl::gpu_selector s; - cl::sycl::queue q(s, [=](cl::sycl::exception_list l) { - for (const auto& e : l) { - try { - std::rethrow_exception(e); - } catch (cl::sycl::exception e) { - std::cout << e.what() << std::endl; - } - } - }); - SyclDevice sycl_device(q); +void test_sycl_cpu(const Eigen::SyclDevice &sycl_device) { int sizeDim1 = 100; int sizeDim2 = 100; int sizeDim3 = 100; array<int, 3> tensorRange = {{sizeDim1, sizeDim2, sizeDim3}}; - TestTensor in1(tensorRange); - TestTensor in2(tensorRange); - TestTensor in3(tensorRange); - TestTensor out(tensorRange); - in1 = in1.random(); + Tensor<float, 3> in1(tensorRange); + Tensor<float, 3> in2(tensorRange); + Tensor<float, 3> in3(tensorRange); + Tensor<float, 3> out(tensorRange); + in2 = in2.random(); in3 = in3.random(); - TestTensorMap gpu_in1(in1.data(), tensorRange); - TestTensorMap gpu_in2(in2.data(), tensorRange); - TestTensorMap gpu_in3(in3.data(), tensorRange); - TestTensorMap gpu_out(out.data(), tensorRange); + + float * gpu_in1_data = static_cast<float*>(sycl_device.allocate(in1.dimensions().TotalSize()*sizeof(float))); + float * gpu_in2_data = static_cast<float*>(sycl_device.allocate(in2.dimensions().TotalSize()*sizeof(float))); + float * gpu_in3_data = static_cast<float*>(sycl_device.allocate(in3.dimensions().TotalSize()*sizeof(float))); + float * gpu_out_data = static_cast<float*>(sycl_device.allocate(out.dimensions().TotalSize()*sizeof(float))); + + TensorMap<Tensor<float, 3>> gpu_in1(gpu_in1_data, tensorRange); + TensorMap<Tensor<float, 3>> gpu_in2(gpu_in2_data, tensorRange); + TensorMap<Tensor<float, 3>> gpu_in3(gpu_in3_data, tensorRange); + TensorMap<Tensor<float, 3>> gpu_out(gpu_out_data, tensorRange); /// a=1.2f gpu_in1.device(sycl_device) = gpu_in1.constant(1.2f); - sycl_device.deallocate(in1.data()); + sycl_device.memcpyDeviceToHost(in1.data(), gpu_in1_data ,(in1.dimensions().TotalSize())*sizeof(float)); for (int i = 0; i < sizeDim1; ++i) { for (int j = 0; j < sizeDim2; ++j) { for (int k = 0; k < sizeDim3; ++k) { @@ -74,7 +65,7 @@ void test_sycl_cpu() { /// a=b*1.2f gpu_out.device(sycl_device) = gpu_in1 * 1.2f; - sycl_device.deallocate(out.data()); + sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data ,(out.dimensions().TotalSize())*sizeof(float)); for (int i = 0; i < sizeDim1; ++i) { for (int j = 0; j < sizeDim2; ++j) { for (int k = 0; k < sizeDim3; ++k) { @@ -86,8 +77,9 @@ void test_sycl_cpu() { printf("a=b*1.2f Test Passed\n"); /// c=a*b + sycl_device.memcpyHostToDevice(gpu_in2_data, in2.data(),(in2.dimensions().TotalSize())*sizeof(float)); gpu_out.device(sycl_device) = gpu_in1 * gpu_in2; - sycl_device.deallocate(out.data()); + sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(float)); for (int i = 0; i < sizeDim1; ++i) { for (int j = 0; j < sizeDim2; ++j) { for (int k = 0; k < sizeDim3; ++k) { @@ -101,7 +93,7 @@ void test_sycl_cpu() { /// c=a+b gpu_out.device(sycl_device) = gpu_in1 + gpu_in2; - sycl_device.deallocate(out.data()); + sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(float)); for (int i = 0; i < sizeDim1; ++i) { for (int j = 0; j < sizeDim2; ++j) { for (int k = 0; k < sizeDim3; ++k) { @@ -115,7 +107,7 @@ void test_sycl_cpu() { /// c=a*a gpu_out.device(sycl_device) = gpu_in1 * gpu_in1; - sycl_device.deallocate(out.data()); + sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(float)); for (int i = 0; i < sizeDim1; ++i) { for (int j = 0; j < sizeDim2; ++j) { for (int k = 0; k < sizeDim3; ++k) { @@ -125,12 +117,11 @@ void test_sycl_cpu() { } } } - printf("c= a*a Test Passed\n"); //a*3.14f + b*2.7f gpu_out.device(sycl_device) = gpu_in1 * gpu_in1.constant(3.14f) + gpu_in2 * gpu_in2.constant(2.7f); - sycl_device.deallocate(out.data()); + sycl_device.memcpyDeviceToHost(out.data(),gpu_out_data,(out.dimensions().TotalSize())*sizeof(float)); for (int i = 0; i < sizeDim1; ++i) { for (int j = 0; j < sizeDim2; ++j) { for (int k = 0; k < sizeDim3; ++k) { @@ -143,8 +134,9 @@ void test_sycl_cpu() { printf("a*3.14f + b*2.7f Test Passed\n"); ///d= (a>0.5? b:c) + sycl_device.memcpyHostToDevice(gpu_in3_data, in3.data(),(in3.dimensions().TotalSize())*sizeof(float)); gpu_out.device(sycl_device) =(gpu_in1 > gpu_in1.constant(0.5f)).select(gpu_in2, gpu_in3); - sycl_device.deallocate(out.data()); + sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(float)); for (int i = 0; i < sizeDim1; ++i) { for (int j = 0; j < sizeDim2; ++j) { for (int k = 0; k < sizeDim3; ++k) { @@ -155,8 +147,13 @@ void test_sycl_cpu() { } } printf("d= (a>0.5? b:c) Test Passed\n"); - + sycl_device.deallocate(gpu_in1_data); + sycl_device.deallocate(gpu_in2_data); + sycl_device.deallocate(gpu_in3_data); + sycl_device.deallocate(gpu_out_data); } void test_cxx11_tensor_sycl() { - CALL_SUBTEST(test_sycl_cpu()); + cl::sycl::gpu_selector s; + Eigen::SyclDevice sycl_device(s); + CALL_SUBTEST(test_sycl_cpu(sycl_device)); } |