aboutsummaryrefslogtreecommitdiffhomepage
path: root/unsupported/test/cxx11_tensor_device_sycl.cpp
diff options
context:
space:
mode:
authorGravatar Benoit Steiner <benoit.steiner.goog@gmail.com>2016-11-18 16:58:09 -0800
committerGravatar Benoit Steiner <benoit.steiner.goog@gmail.com>2016-11-18 16:58:09 -0800
commita357fe1fb9b053c57af62f76f150a70314f06e92 (patch)
treeae9606f3412ab1da099d8590ef6bcc6055faae1b /unsupported/test/cxx11_tensor_device_sycl.cpp
parent1c6eafb46b8b3a0fb5dd583ed546588c8869a6be (diff)
Code cleanup
Diffstat (limited to 'unsupported/test/cxx11_tensor_device_sycl.cpp')
-rw-r--r--unsupported/test/cxx11_tensor_device_sycl.cpp15
1 files changed, 9 insertions, 6 deletions
diff --git a/unsupported/test/cxx11_tensor_device_sycl.cpp b/unsupported/test/cxx11_tensor_device_sycl.cpp
index a41fd37c2..9e13d2f1b 100644
--- a/unsupported/test/cxx11_tensor_device_sycl.cpp
+++ b/unsupported/test/cxx11_tensor_device_sycl.cpp
@@ -31,10 +31,10 @@ void test_device_memory(const Eigen::SyclDevice &sycl_device) {
array<int, 1> tensorRange = {{sizeDim1}};
Tensor<DataType, 1, DataLayout> in(tensorRange);
Tensor<DataType, 1, DataLayout> in1(tensorRange);
- memset(in1.data(), 1,in1.size()*sizeof(DataType));
- DataType * gpu_in_data = static_cast<DataType*>(sycl_device.allocate(in.size()*sizeof(DataType)));
- sycl_device.memset(gpu_in_data, 1,in.size()*sizeof(DataType) );
- sycl_device.memcpyDeviceToHost(in.data(), gpu_in_data, in.size()*sizeof(DataType) );
+ memset(in1.data(), 1, in1.size() * sizeof(DataType));
+ DataType* gpu_in_data = static_cast<DataType*>(sycl_device.allocate(in.size()*sizeof(DataType)));
+ sycl_device.memset(gpu_in_data, 1, in.size()*sizeof(DataType));
+ sycl_device.memcpyDeviceToHost(in.data(), gpu_in_data, in.size()*sizeof(DataType));
for (int i=0; i<in.size(); i++) {
VERIFY_IS_EQUAL(in(i), in1(i));
}
@@ -47,10 +47,13 @@ void test_device_exceptions(const Eigen::SyclDevice &sycl_device) {
int sizeDim1 = 100;
array<int, 1> tensorDims = {{sizeDim1}};
DataType* gpu_data = static_cast<DataType*>(sycl_device.allocate(sizeDim1*sizeof(DataType)));
- TensorMap<Tensor<DataType, 1,DataLayout>> in(gpu_data, tensorDims);
- TensorMap<Tensor<DataType, 1,DataLayout>> out(gpu_data, tensorDims);
+ sycl_device.memset(gpu_data, 1, sizeDim1*sizeof(DataType));
+ TensorMap<Tensor<DataType, 1, DataLayout>> in(gpu_data, tensorDims);
+ TensorMap<Tensor<DataType, 1, DataLayout>> out(gpu_data, tensorDims);
out.device(sycl_device) = in / in.constant(0);
+
+ sycl_device.synchronize();
VERIFY(!sycl_device.ok());
sycl_device.deallocate(gpu_data);
}