From 00f32752f7d0b193c6788691c3cf0b76457a044d Mon Sep 17 00:00:00 2001 From: Mehdi Goli Date: Thu, 28 Nov 2019 10:08:54 +0000 Subject: [SYCL] Rebasing the SYCL support branch on top of the Einge upstream master branch. * Unifying all loadLocalTile from lhs and rhs to an extract_block function. * Adding get_tensor operation which was missing in TensorContractionMapper. * Adding the -D method missing from cmake for Disable_Skinny Contraction operation. * Wrapping all the indices in TensorScanSycl into Scan parameter struct. * Fixing typo in Device SYCL * Unifying load to private register for tall/skinny no shared * Unifying load to vector tile for tensor-vector/vector-tensor operation * Removing all the LHS/RHS class for extracting data from global * Removing Outputfunction from TensorContractionSkinnyNoshared. * Combining the local memory version of tall/skinny and normal tensor contraction into one kernel. * Combining the no-local memory version of tall/skinny and normal tensor contraction into one kernel. * Combining General Tensor-Vector and VectorTensor contraction into one kernel. * Making double buffering optional for Tensor contraction when local memory is version is used. * Modifying benchmark to accept custom Reduction Sizes * Disabling AVX optimization for SYCL backend on the host to allow SSE optimization to the host * Adding Test for SYCL * Modifying SYCL CMake --- unsupported/test/cxx11_tensor_math_sycl.cpp | 105 ++++++++++++++++++++++++++++ 1 file changed, 105 insertions(+) create mode 100644 unsupported/test/cxx11_tensor_math_sycl.cpp (limited to 'unsupported/test/cxx11_tensor_math_sycl.cpp') diff --git a/unsupported/test/cxx11_tensor_math_sycl.cpp b/unsupported/test/cxx11_tensor_math_sycl.cpp new file mode 100644 index 000000000..029653e27 --- /dev/null +++ b/unsupported/test/cxx11_tensor_math_sycl.cpp @@ -0,0 +1,105 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 +// Mehdi Goli Codeplay Software Ltd. +// Ralph Potter Codeplay Software Ltd. +// Luke Iwanski Codeplay Software Ltd. +// Contact: +// Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#define EIGEN_TEST_NO_LONGDOUBLE +#define EIGEN_TEST_NO_COMPLEX +#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int64_t +#define EIGEN_USE_SYCL + +#include "main.h" +#include + +using Eigen::array; +using Eigen::SyclDevice; +using Eigen::Tensor; +using Eigen::TensorMap; + +using Eigen::Tensor; +using Eigen::RowMajor; +template +static void test_tanh_sycl(const Eigen::SyclDevice &sycl_device) +{ + + IndexType sizeDim1 = 4; + IndexType sizeDim2 = 4; + IndexType sizeDim3 = 1; + array tensorRange = {{sizeDim1, sizeDim2, sizeDim3}}; + Tensor in(tensorRange); + Tensor out(tensorRange); + Tensor out_cpu(tensorRange); + + in = in.random(); + + DataType* gpu_data1 = static_cast(sycl_device.allocate(in.size()*sizeof(DataType))); + DataType* gpu_data2 = static_cast(sycl_device.allocate(out.size()*sizeof(DataType))); + + TensorMap> gpu1(gpu_data1, tensorRange); + TensorMap> gpu2(gpu_data2, tensorRange); + + sycl_device.memcpyHostToDevice(gpu_data1, in.data(),(in.size())*sizeof(DataType)); + gpu2.device(sycl_device) = gpu1.tanh(); + sycl_device.memcpyDeviceToHost(out.data(), gpu_data2,(out.size())*sizeof(DataType)); + + out_cpu=in.tanh(); + + for (int i = 0; i < in.size(); ++i) { + VERIFY_IS_APPROX(out(i), out_cpu(i)); + } +} +template +static void test_sigmoid_sycl(const Eigen::SyclDevice &sycl_device) +{ + + IndexType sizeDim1 = 4; + IndexType sizeDim2 = 4; + IndexType sizeDim3 = 1; + array tensorRange = {{sizeDim1, sizeDim2, sizeDim3}}; + Tensor in(tensorRange); + Tensor out(tensorRange); + Tensor out_cpu(tensorRange); + + in = in.random(); + + DataType* gpu_data1 = static_cast(sycl_device.allocate(in.size()*sizeof(DataType))); + DataType* gpu_data2 = static_cast(sycl_device.allocate(out.size()*sizeof(DataType))); + + TensorMap> gpu1(gpu_data1, tensorRange); + TensorMap> gpu2(gpu_data2, tensorRange); + + sycl_device.memcpyHostToDevice(gpu_data1, in.data(),(in.size())*sizeof(DataType)); + gpu2.device(sycl_device) = gpu1.sigmoid(); + sycl_device.memcpyDeviceToHost(out.data(), gpu_data2,(out.size())*sizeof(DataType)); + + out_cpu=in.sigmoid(); + + for (int i = 0; i < in.size(); ++i) { + VERIFY_IS_APPROX(out(i), out_cpu(i)); + } +} + + +template void sycl_computing_test_per_device(dev_Selector s){ + QueueInterface queueInterface(s); + auto sycl_device = Eigen::SyclDevice(&queueInterface); + test_tanh_sycl(sycl_device); + test_tanh_sycl(sycl_device); + test_sigmoid_sycl(sycl_device); + test_sigmoid_sycl(sycl_device); +} + +EIGEN_DECLARE_TEST(cxx11_tensor_math_sycl) { + for (const auto& device :Eigen::get_sycl_supported_devices()) { + CALL_SUBTEST(sycl_computing_test_per_device(device)); + } +} -- cgit v1.2.3