From 00f32752f7d0b193c6788691c3cf0b76457a044d Mon Sep 17 00:00:00 2001 From: Mehdi Goli Date: Thu, 28 Nov 2019 10:08:54 +0000 Subject: [SYCL] Rebasing the SYCL support branch on top of the Einge upstream master branch. * Unifying all loadLocalTile from lhs and rhs to an extract_block function. * Adding get_tensor operation which was missing in TensorContractionMapper. * Adding the -D method missing from cmake for Disable_Skinny Contraction operation. * Wrapping all the indices in TensorScanSycl into Scan parameter struct. * Fixing typo in Device SYCL * Unifying load to private register for tall/skinny no shared * Unifying load to vector tile for tensor-vector/vector-tensor operation * Removing all the LHS/RHS class for extracting data from global * Removing Outputfunction from TensorContractionSkinnyNoshared. * Combining the local memory version of tall/skinny and normal tensor contraction into one kernel. * Combining the no-local memory version of tall/skinny and normal tensor contraction into one kernel. * Combining General Tensor-Vector and VectorTensor contraction into one kernel. * Making double buffering optional for Tensor contraction when local memory is version is used. * Modifying benchmark to accept custom Reduction Sizes * Disabling AVX optimization for SYCL backend on the host to allow SSE optimization to the host * Adding Test for SYCL * Modifying SYCL CMake --- unsupported/test/cxx11_tensor_argmax_sycl.cpp | 136 ++++++++++++++------------ 1 file changed, 74 insertions(+), 62 deletions(-) (limited to 'unsupported/test/cxx11_tensor_argmax_sycl.cpp') diff --git a/unsupported/test/cxx11_tensor_argmax_sycl.cpp b/unsupported/test/cxx11_tensor_argmax_sycl.cpp index 0bbb0f6dc..41ea3cf7b 100644 --- a/unsupported/test/cxx11_tensor_argmax_sycl.cpp +++ b/unsupported/test/cxx11_tensor_argmax_sycl.cpp @@ -18,6 +18,7 @@ #define EIGEN_USE_SYCL #include "main.h" + #include using Eigen::array; @@ -26,9 +27,8 @@ using Eigen::Tensor; using Eigen::TensorMap; template -static void test_sycl_simple_argmax(const Eigen::SyclDevice &sycl_device){ - - Tensor in(Eigen::array{{2,2,2}}); +static void test_sycl_simple_argmax(const Eigen::SyclDevice& sycl_device) { + Tensor in(Eigen::array{{2, 2, 2}}); Tensor out_max; Tensor out_min; in.setRandom(); @@ -39,14 +39,15 @@ static void test_sycl_simple_argmax(const Eigen::SyclDevice &sycl_device){ std::size_t in_bytes = in.size() * sizeof(DataType); std::size_t out_bytes = out_max.size() * sizeof(DenseIndex); - DataType * d_in = static_cast(sycl_device.allocate(in_bytes)); + DataType* d_in = static_cast(sycl_device.allocate(in_bytes)); DenseIndex* d_out_max = static_cast(sycl_device.allocate(out_bytes)); DenseIndex* d_out_min = static_cast(sycl_device.allocate(out_bytes)); - Eigen::TensorMap > gpu_in(d_in, Eigen::array{{2,2,2}}); + Eigen::TensorMap > gpu_in(d_in, + Eigen::array{{2, 2, 2}}); Eigen::TensorMap > gpu_out_max(d_out_max); Eigen::TensorMap > gpu_out_min(d_out_min); - sycl_device.memcpyHostToDevice(d_in, in.data(),in_bytes); + sycl_device.memcpyHostToDevice(d_in, in.data(), in_bytes); gpu_out_max.device(sycl_device) = gpu_in.argmax(); gpu_out_min.device(sycl_device) = gpu_in.argmin(); @@ -54,7 +55,7 @@ static void test_sycl_simple_argmax(const Eigen::SyclDevice &sycl_device){ sycl_device.memcpyDeviceToHost(out_max.data(), d_out_max, out_bytes); sycl_device.memcpyDeviceToHost(out_min.data(), d_out_min, out_bytes); - VERIFY_IS_EQUAL(out_max(), 2*2*2 - 1); + VERIFY_IS_EQUAL(out_max(), 2 * 2 * 2 - 1); VERIFY_IS_EQUAL(out_min(), 0); sycl_device.deallocate(d_in); @@ -62,22 +63,22 @@ static void test_sycl_simple_argmax(const Eigen::SyclDevice &sycl_device){ sycl_device.deallocate(d_out_min); } - template -static void test_sycl_argmax_dim(const Eigen::SyclDevice &sycl_device) -{ - DenseIndex sizeDim0=9; - DenseIndex sizeDim1=3; - DenseIndex sizeDim2=5; - DenseIndex sizeDim3=7; - Tensor tensor(sizeDim0,sizeDim1,sizeDim2,sizeDim3); +static void test_sycl_argmax_dim(const Eigen::SyclDevice& sycl_device) { + DenseIndex sizeDim0 = 9; + DenseIndex sizeDim1 = 3; + DenseIndex sizeDim2 = 5; + DenseIndex sizeDim3 = 7; + Tensor tensor(sizeDim0, sizeDim1, sizeDim2, sizeDim3); std::vector dims; - dims.push_back(sizeDim0); dims.push_back(sizeDim1); dims.push_back(sizeDim2); dims.push_back(sizeDim3); + dims.push_back(sizeDim0); + dims.push_back(sizeDim1); + dims.push_back(sizeDim2); + dims.push_back(sizeDim3); for (DenseIndex dim = 0; dim < 4; ++dim) { - array out_shape; - for (DenseIndex d = 0; d < 3; ++d) out_shape[d] = (d < dim) ? dims[d] : dims[d+1]; + for (DenseIndex d = 0; d < 3; ++d) out_shape[d] = (d < dim) ? dims[d] : dims[d + 1]; Tensor tensor_arg(out_shape); @@ -86,9 +87,13 @@ static void test_sycl_argmax_dim(const Eigen::SyclDevice &sycl_device) for (DenseIndex j = 0; j < sizeDim1; ++j) { for (DenseIndex k = 0; k < sizeDim2; ++k) { for (DenseIndex l = 0; l < sizeDim3; ++l) { - ix[0] = i; ix[1] = j; ix[2] = k; ix[3] = l; - // suppose dim == 1, then for all i, k, l, set tensor(i, 0, k, l) = 10.0 - tensor(ix)=(ix[dim] != 0)?-1.0:10.0; + ix[0] = i; + ix[1] = j; + ix[2] = k; + ix[3] = l; + // suppose dim == 1, then for all i, k, l, set tensor(i, 0, k, l) + // = 10.0 + tensor(ix) = (ix[dim] != 0) ? -1.0 : 10.0; } } } @@ -97,23 +102,23 @@ static void test_sycl_argmax_dim(const Eigen::SyclDevice &sycl_device) std::size_t in_bytes = tensor.size() * sizeof(DataType); std::size_t out_bytes = tensor_arg.size() * sizeof(DenseIndex); + DataType* d_in = static_cast(sycl_device.allocate(in_bytes)); + DenseIndex* d_out = static_cast(sycl_device.allocate(out_bytes)); - DataType * d_in = static_cast(sycl_device.allocate(in_bytes)); - DenseIndex* d_out= static_cast(sycl_device.allocate(out_bytes)); - - Eigen::TensorMap > gpu_in(d_in, Eigen::array{{sizeDim0,sizeDim1,sizeDim2,sizeDim3}}); + Eigen::TensorMap > gpu_in( + d_in, Eigen::array{{sizeDim0, sizeDim1, sizeDim2, sizeDim3}}); Eigen::TensorMap > gpu_out(d_out, out_shape); - sycl_device.memcpyHostToDevice(d_in, tensor.data(),in_bytes); + sycl_device.memcpyHostToDevice(d_in, tensor.data(), in_bytes); gpu_out.device(sycl_device) = gpu_in.argmax(dim); sycl_device.memcpyDeviceToHost(tensor_arg.data(), d_out, out_bytes); VERIFY_IS_EQUAL(static_cast(tensor_arg.size()), - size_t(sizeDim0*sizeDim1*sizeDim2*sizeDim3 / tensor.dimension(dim))); + size_t(sizeDim0 * sizeDim1 * sizeDim2 * sizeDim3 / tensor.dimension(dim))); for (DenseIndex n = 0; n < tensor_arg.size(); ++n) { // Expect max to be in the first index of the reduced dimension - VERIFY_IS_EQUAL(tensor_arg.data()[n], 0); + VERIFY_IS_EQUAL(tensor_arg.data()[n], 0); } sycl_device.synchronize(); @@ -122,15 +127,18 @@ static void test_sycl_argmax_dim(const Eigen::SyclDevice &sycl_device) for (DenseIndex j = 0; j < sizeDim1; ++j) { for (DenseIndex k = 0; k < sizeDim2; ++k) { for (DenseIndex l = 0; l < sizeDim3; ++l) { - ix[0] = i; ix[1] = j; ix[2] = k; ix[3] = l; + ix[0] = i; + ix[1] = j; + ix[2] = k; + ix[3] = l; // suppose dim == 1, then for all i, k, l, set tensor(i, 2, k, l) = 20.0 - tensor(ix)=(ix[dim] != tensor.dimension(dim) - 1)?-1.0:20.0; + tensor(ix) = (ix[dim] != tensor.dimension(dim) - 1) ? -1.0 : 20.0; } } } } - sycl_device.memcpyHostToDevice(d_in, tensor.data(),in_bytes); + sycl_device.memcpyHostToDevice(d_in, tensor.data(), in_bytes); gpu_out.device(sycl_device) = gpu_in.argmax(dim); sycl_device.memcpyDeviceToHost(tensor_arg.data(), d_out, out_bytes); @@ -144,20 +152,21 @@ static void test_sycl_argmax_dim(const Eigen::SyclDevice &sycl_device) } template -static void test_sycl_argmin_dim(const Eigen::SyclDevice &sycl_device) -{ - DenseIndex sizeDim0=9; - DenseIndex sizeDim1=3; - DenseIndex sizeDim2=5; - DenseIndex sizeDim3=7; - Tensor tensor(sizeDim0,sizeDim1,sizeDim2,sizeDim3); +static void test_sycl_argmin_dim(const Eigen::SyclDevice& sycl_device) { + DenseIndex sizeDim0 = 9; + DenseIndex sizeDim1 = 3; + DenseIndex sizeDim2 = 5; + DenseIndex sizeDim3 = 7; + Tensor tensor(sizeDim0, sizeDim1, sizeDim2, sizeDim3); std::vector dims; - dims.push_back(sizeDim0); dims.push_back(sizeDim1); dims.push_back(sizeDim2); dims.push_back(sizeDim3); + dims.push_back(sizeDim0); + dims.push_back(sizeDim1); + dims.push_back(sizeDim2); + dims.push_back(sizeDim3); for (DenseIndex dim = 0; dim < 4; ++dim) { - array out_shape; - for (DenseIndex d = 0; d < 3; ++d) out_shape[d] = (d < dim) ? dims[d] : dims[d+1]; + for (DenseIndex d = 0; d < 3; ++d) out_shape[d] = (d < dim) ? dims[d] : dims[d + 1]; Tensor tensor_arg(out_shape); @@ -166,9 +175,12 @@ static void test_sycl_argmin_dim(const Eigen::SyclDevice &sycl_device) for (DenseIndex j = 0; j < sizeDim1; ++j) { for (DenseIndex k = 0; k < sizeDim2; ++k) { for (DenseIndex l = 0; l < sizeDim3; ++l) { - ix[0] = i; ix[1] = j; ix[2] = k; ix[3] = l; - // suppose dim == 1, then for all i, k, l, set tensor(i, 0, k, l) = 10.0 - tensor(ix)=(ix[dim] != 0)?1.0:-10.0; + ix[0] = i; + ix[1] = j; + ix[2] = k; + ix[3] = l; + // suppose dim == 1, then for all i, k, l, set tensor(i, 0, k, l) = -10.0 + tensor(ix) = (ix[dim] != 0) ? 1.0 : -10.0; } } } @@ -177,23 +189,23 @@ static void test_sycl_argmin_dim(const Eigen::SyclDevice &sycl_device) std::size_t in_bytes = tensor.size() * sizeof(DataType); std::size_t out_bytes = tensor_arg.size() * sizeof(DenseIndex); + DataType* d_in = static_cast(sycl_device.allocate(in_bytes)); + DenseIndex* d_out = static_cast(sycl_device.allocate(out_bytes)); - DataType * d_in = static_cast(sycl_device.allocate(in_bytes)); - DenseIndex* d_out= static_cast(sycl_device.allocate(out_bytes)); - - Eigen::TensorMap > gpu_in(d_in, Eigen::array{{sizeDim0,sizeDim1,sizeDim2,sizeDim3}}); + Eigen::TensorMap > gpu_in( + d_in, Eigen::array{{sizeDim0, sizeDim1, sizeDim2, sizeDim3}}); Eigen::TensorMap > gpu_out(d_out, out_shape); - sycl_device.memcpyHostToDevice(d_in, tensor.data(),in_bytes); + sycl_device.memcpyHostToDevice(d_in, tensor.data(), in_bytes); gpu_out.device(sycl_device) = gpu_in.argmin(dim); sycl_device.memcpyDeviceToHost(tensor_arg.data(), d_out, out_bytes); VERIFY_IS_EQUAL(static_cast(tensor_arg.size()), - size_t(sizeDim0*sizeDim1*sizeDim2*sizeDim3 / tensor.dimension(dim))); + size_t(sizeDim0 * sizeDim1 * sizeDim2 * sizeDim3 / tensor.dimension(dim))); for (DenseIndex n = 0; n < tensor_arg.size(); ++n) { // Expect max to be in the first index of the reduced dimension - VERIFY_IS_EQUAL(tensor_arg.data()[n], 0); + VERIFY_IS_EQUAL(tensor_arg.data()[n], 0); } sycl_device.synchronize(); @@ -202,15 +214,18 @@ static void test_sycl_argmin_dim(const Eigen::SyclDevice &sycl_device) for (DenseIndex j = 0; j < sizeDim1; ++j) { for (DenseIndex k = 0; k < sizeDim2; ++k) { for (DenseIndex l = 0; l < sizeDim3; ++l) { - ix[0] = i; ix[1] = j; ix[2] = k; ix[3] = l; - // suppose dim == 1, then for all i, k, l, set tensor(i, 2, k, l) = 20.0 - tensor(ix)=(ix[dim] != tensor.dimension(dim) - 1)?1.0:-20.0; + ix[0] = i; + ix[1] = j; + ix[2] = k; + ix[3] = l; + // suppose dim == 1, then for all i, k, l, set tensor(i, 2, k, l) = -20.0 + tensor(ix) = (ix[dim] != tensor.dimension(dim) - 1) ? 1.0 : -20.0; } } } } - sycl_device.memcpyHostToDevice(d_in, tensor.data(),in_bytes); + sycl_device.memcpyHostToDevice(d_in, tensor.data(), in_bytes); gpu_out.device(sycl_device) = gpu_in.argmin(dim); sycl_device.memcpyDeviceToHost(tensor_arg.data(), d_out, out_bytes); @@ -223,10 +238,8 @@ static void test_sycl_argmin_dim(const Eigen::SyclDevice &sycl_device) } } - - - -template void sycl_argmax_test_per_device(const Device_Selector& d){ +template +void sycl_argmax_test_per_device(const Device_Selector& d) { QueueInterface queueInterface(d); auto sycl_device = Eigen::SyclDevice(&queueInterface); test_sycl_simple_argmax(sycl_device); @@ -238,8 +251,7 @@ template void sycl_argmax_test_per_ } EIGEN_DECLARE_TEST(cxx11_tensor_argmax_sycl) { - for (const auto& device :Eigen::get_sycl_supported_devices()) { - CALL_SUBTEST(sycl_argmax_test_per_device(device)); + for (const auto& device : Eigen::get_sycl_supported_devices()) { + CALL_SUBTEST(sycl_argmax_test_per_device(device)); } - } -- cgit v1.2.3