From bab29936a1cf0a68ffe4ccb1fd9b4807a3ec87ae Mon Sep 17 00:00:00 2001 From: Mehdi Goli Date: Wed, 1 Feb 2017 15:29:53 +0000 Subject: Reducing warnings in Sycl backend. --- unsupported/test/cxx11_tensor_reverse_sycl.cpp | 112 ++++++++++++------------- 1 file changed, 56 insertions(+), 56 deletions(-) (limited to 'unsupported/test/cxx11_tensor_reverse_sycl.cpp') diff --git a/unsupported/test/cxx11_tensor_reverse_sycl.cpp b/unsupported/test/cxx11_tensor_reverse_sycl.cpp index 73b394c18..2f5484484 100644 --- a/unsupported/test/cxx11_tensor_reverse_sycl.cpp +++ b/unsupported/test/cxx11_tensor_reverse_sycl.cpp @@ -14,24 +14,24 @@ #define EIGEN_TEST_NO_LONGDOUBLE #define EIGEN_TEST_NO_COMPLEX #define EIGEN_TEST_FUNC cxx11_tensor_reverse_sycl -#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int +#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int64_t #define EIGEN_USE_SYCL #include "main.h" #include -template +template static void test_simple_reverse(const Eigen::SyclDevice& sycl_device) { - int dim1 = 2; - int dim2 = 3; - int dim3 = 5; - int dim4 = 7; + IndexType dim1 = 2; + IndexType dim2 = 3; + IndexType dim3 = 5; + IndexType dim4 = 7; - array tensorRange = {{dim1, dim2, dim3, dim4}}; - Tensor tensor(tensorRange); - Tensor reversed_tensor(tensorRange); + array tensorRange = {{dim1, dim2, dim3, dim4}}; + Tensor tensor(tensorRange); + Tensor reversed_tensor(tensorRange); tensor.setRandom(); array dim_rev; @@ -43,17 +43,17 @@ static void test_simple_reverse(const Eigen::SyclDevice& sycl_device) { DataType* gpu_in_data = static_cast(sycl_device.allocate(tensor.dimensions().TotalSize()*sizeof(DataType))); DataType* gpu_out_data =static_cast(sycl_device.allocate(reversed_tensor.dimensions().TotalSize()*sizeof(DataType))); - TensorMap > in_gpu(gpu_in_data, tensorRange); - TensorMap > out_gpu(gpu_out_data, tensorRange); + TensorMap > in_gpu(gpu_in_data, tensorRange); + TensorMap > out_gpu(gpu_out_data, tensorRange); sycl_device.memcpyHostToDevice(gpu_in_data, tensor.data(),(tensor.dimensions().TotalSize())*sizeof(DataType)); out_gpu.device(sycl_device) = in_gpu.reverse(dim_rev); sycl_device.memcpyDeviceToHost(reversed_tensor.data(), gpu_out_data, reversed_tensor.dimensions().TotalSize()*sizeof(DataType)); // Check that the CPU and GPU reductions return the same result. - for (int i = 0; i < 2; ++i) { - for (int j = 0; j < 3; ++j) { - for (int k = 0; k < 5; ++k) { - for (int l = 0; l < 7; ++l) { + for (IndexType i = 0; i < 2; ++i) { + for (IndexType j = 0; j < 3; ++j) { + for (IndexType k = 0; k < 5; ++k) { + for (IndexType l = 0; l < 7; ++l) { VERIFY_IS_EQUAL(tensor(i,j,k,l), reversed_tensor(i,2-j,4-k,l)); } } @@ -67,10 +67,10 @@ static void test_simple_reverse(const Eigen::SyclDevice& sycl_device) { out_gpu.device(sycl_device) = in_gpu.reverse(dim_rev); sycl_device.memcpyDeviceToHost(reversed_tensor.data(), gpu_out_data, reversed_tensor.dimensions().TotalSize()*sizeof(DataType)); - for (int i = 0; i < 2; ++i) { - for (int j = 0; j < 3; ++j) { - for (int k = 0; k < 5; ++k) { - for (int l = 0; l < 7; ++l) { + for (IndexType i = 0; i < 2; ++i) { + for (IndexType j = 0; j < 3; ++j) { + for (IndexType k = 0; k < 5; ++k) { + for (IndexType l = 0; l < 7; ++l) { VERIFY_IS_EQUAL(tensor(i,j,k,l), reversed_tensor(1-i,j,k,l)); } } @@ -84,10 +84,10 @@ static void test_simple_reverse(const Eigen::SyclDevice& sycl_device) { out_gpu.device(sycl_device) = in_gpu.reverse(dim_rev); sycl_device.memcpyDeviceToHost(reversed_tensor.data(), gpu_out_data, reversed_tensor.dimensions().TotalSize()*sizeof(DataType)); - for (int i = 0; i < 2; ++i) { - for (int j = 0; j < 3; ++j) { - for (int k = 0; k < 5; ++k) { - for (int l = 0; l < 7; ++l) { + for (IndexType i = 0; i < 2; ++i) { + for (IndexType j = 0; j < 3; ++j) { + for (IndexType k = 0; k < 5; ++k) { + for (IndexType l = 0; l < 7; ++l) { VERIFY_IS_EQUAL(tensor(i,j,k,l), reversed_tensor(1-i,j,k,6-l)); } } @@ -100,18 +100,18 @@ static void test_simple_reverse(const Eigen::SyclDevice& sycl_device) { -template +template static void test_expr_reverse(const Eigen::SyclDevice& sycl_device, bool LValue) { - int dim1 = 2; - int dim2 = 3; - int dim3 = 5; - int dim4 = 7; - - array tensorRange = {{dim1, dim2, dim3, dim4}}; - Tensor tensor(tensorRange); - Tensor expected(tensorRange); - Tensor result(tensorRange); + IndexType dim1 = 2; + IndexType dim2 = 3; + IndexType dim3 = 5; + IndexType dim4 = 7; + + array tensorRange = {{dim1, dim2, dim3, dim4}}; + Tensor tensor(tensorRange); + Tensor expected(tensorRange); + Tensor result(tensorRange); tensor.setRandom(); array dim_rev; @@ -124,9 +124,9 @@ static void test_expr_reverse(const Eigen::SyclDevice& sycl_device, bool LValue DataType* gpu_out_data_expected =static_cast(sycl_device.allocate(expected.dimensions().TotalSize()*sizeof(DataType))); DataType* gpu_out_data_result =static_cast(sycl_device.allocate(result.dimensions().TotalSize()*sizeof(DataType))); - TensorMap > in_gpu(gpu_in_data, tensorRange); - TensorMap > out_gpu_expected(gpu_out_data_expected, tensorRange); - TensorMap > out_gpu_result(gpu_out_data_result, tensorRange); + TensorMap > in_gpu(gpu_in_data, tensorRange); + TensorMap > out_gpu_expected(gpu_out_data_expected, tensorRange); + TensorMap > out_gpu_result(gpu_out_data_result, tensorRange); sycl_device.memcpyHostToDevice(gpu_in_data, tensor.data(),(tensor.dimensions().TotalSize())*sizeof(DataType)); @@ -139,20 +139,20 @@ static void test_expr_reverse(const Eigen::SyclDevice& sycl_device, bool LValue sycl_device.memcpyDeviceToHost(expected.data(), gpu_out_data_expected, expected.dimensions().TotalSize()*sizeof(DataType)); - array src_slice_dim; + array src_slice_dim; src_slice_dim[0] = 2; src_slice_dim[1] = 3; src_slice_dim[2] = 1; src_slice_dim[3] = 7; - array src_slice_start; + array src_slice_start; src_slice_start[0] = 0; src_slice_start[1] = 0; src_slice_start[2] = 0; src_slice_start[3] = 0; - array dst_slice_dim = src_slice_dim; - array dst_slice_start = src_slice_start; + array dst_slice_dim = src_slice_dim; + array dst_slice_start = src_slice_start; - for (int i = 0; i < 5; ++i) { + for (IndexType i = 0; i < 5; ++i) { if (LValue) { out_gpu_result.slice(dst_slice_start, dst_slice_dim).reverse(dim_rev).device(sycl_device) = in_gpu.slice(src_slice_start, src_slice_dim); @@ -165,10 +165,10 @@ static void test_expr_reverse(const Eigen::SyclDevice& sycl_device, bool LValue } sycl_device.memcpyDeviceToHost(result.data(), gpu_out_data_result, result.dimensions().TotalSize()*sizeof(DataType)); - for (int i = 0; i < expected.dimension(0); ++i) { - for (int j = 0; j < expected.dimension(1); ++j) { - for (int k = 0; k < expected.dimension(2); ++k) { - for (int l = 0; l < expected.dimension(3); ++l) { + for (IndexType i = 0; i < expected.dimension(0); ++i) { + for (IndexType j = 0; j < expected.dimension(1); ++j) { + for (IndexType k = 0; k < expected.dimension(2); ++k) { + for (IndexType l = 0; l < expected.dimension(3); ++l) { VERIFY_IS_EQUAL(result(i,j,k,l), expected(i,j,k,l)); } } @@ -178,7 +178,7 @@ static void test_expr_reverse(const Eigen::SyclDevice& sycl_device, bool LValue dst_slice_start[2] = 0; result.setRandom(); sycl_device.memcpyHostToDevice(gpu_out_data_result, result.data(),(result.dimensions().TotalSize())*sizeof(DataType)); - for (int i = 0; i < 5; ++i) { + for (IndexType i = 0; i < 5; ++i) { if (LValue) { out_gpu_result.slice(dst_slice_start, dst_slice_dim).reverse(dim_rev).device(sycl_device) = in_gpu.slice(dst_slice_start, dst_slice_dim); @@ -190,10 +190,10 @@ static void test_expr_reverse(const Eigen::SyclDevice& sycl_device, bool LValue } sycl_device.memcpyDeviceToHost(result.data(), gpu_out_data_result, result.dimensions().TotalSize()*sizeof(DataType)); - for (int i = 0; i < expected.dimension(0); ++i) { - for (int j = 0; j < expected.dimension(1); ++j) { - for (int k = 0; k < expected.dimension(2); ++k) { - for (int l = 0; l < expected.dimension(3); ++l) { + for (IndexType i = 0; i < expected.dimension(0); ++i) { + for (IndexType j = 0; j < expected.dimension(1); ++j) { + for (IndexType k = 0; k < expected.dimension(2); ++k) { + for (IndexType l = 0; l < expected.dimension(3); ++l) { VERIFY_IS_EQUAL(result(i,j,k,l), expected(i,j,k,l)); } } @@ -207,12 +207,12 @@ template void sycl_reverse_test_per_device(const cl::sycl::de std::cout << "Running on " << d.template get_info() << std::endl; QueueInterface queueInterface(d); auto sycl_device = Eigen::SyclDevice(&queueInterface); - test_simple_reverse(sycl_device); - test_simple_reverse(sycl_device); - test_expr_reverse(sycl_device, false); - test_expr_reverse(sycl_device, false); - test_expr_reverse(sycl_device, true); - test_expr_reverse(sycl_device, true); + test_simple_reverse(sycl_device); + test_simple_reverse(sycl_device); + test_expr_reverse(sycl_device, false); + test_expr_reverse(sycl_device, false); + test_expr_reverse(sycl_device, true); + test_expr_reverse(sycl_device, true); } void test_cxx11_tensor_reverse_sycl() { for (const auto& device :Eigen::get_sycl_supported_devices()) { -- cgit v1.2.3