aboutsummaryrefslogtreecommitdiffhomepage
path: root/unsupported/test
diff options
context:
space:
mode:
authorGravatar Mehdi Goli <mehdi.goli@codeplay.com>2017-01-16 13:58:49 +0000
committerGravatar Mehdi Goli <mehdi.goli@codeplay.com>2017-01-16 13:58:49 +0000
commite46e7223817cfd982edec6d8e25c77e8e2493d78 (patch)
tree3b8345ae7bb7ab2434b117932aea51f016acf43d /unsupported/test
parent23778a15d8570b4287820f540b719203e07cfb44 (diff)
Adding Tensor ReverseOp; TensorStriding; TensorConversionOp; Modifying Tensor Contractsycl to be located in any place in the expression tree.
Diffstat (limited to 'unsupported/test')
-rw-r--r--unsupported/test/CMakeLists.txt2
-rw-r--r--unsupported/test/cxx11_tensor_contract_sycl.cpp71
-rw-r--r--unsupported/test/cxx11_tensor_reverse_sycl.cpp221
-rw-r--r--unsupported/test/cxx11_tensor_striding_sycl.cpp203
-rw-r--r--unsupported/test/cxx11_tensor_sycl.cpp32
5 files changed, 527 insertions, 2 deletions
diff --git a/unsupported/test/CMakeLists.txt b/unsupported/test/CMakeLists.txt
index daedb671c..cbbd3efb4 100644
--- a/unsupported/test/CMakeLists.txt
+++ b/unsupported/test/CMakeLists.txt
@@ -152,6 +152,8 @@ if(EIGEN_TEST_CXX11)
ei_add_test_sycl(cxx11_tensor_builtins_sycl "-std=c++11")
ei_add_test_sycl(cxx11_tensor_contract_sycl "-std=c++11")
ei_add_test_sycl(cxx11_tensor_concatenation_sycl "-std=c++11")
+ ei_add_test_sycl(cxx11_tensor_reverse_sycl "-std=c++11")
+ ei_add_test_sycl(cxx11_tensor_striding_sycl "-std=c++11")
endif(EIGEN_TEST_SYCL)
# It should be safe to always run these tests as there is some fallback code for
# older compiler that don't support cxx11.
diff --git a/unsupported/test/cxx11_tensor_contract_sycl.cpp b/unsupported/test/cxx11_tensor_contract_sycl.cpp
index 0221da110..5dacc87f2 100644
--- a/unsupported/test/cxx11_tensor_contract_sycl.cpp
+++ b/unsupported/test/cxx11_tensor_contract_sycl.cpp
@@ -65,10 +65,71 @@ void test_sycl_contraction(const Device& sycl_device, int m_size, int k_size, in
sycl_device.memcpyHostToDevice(d_t_right, t_right.data(),t_right_bytes);
gpu_t_result.device(sycl_device) = gpu_t_left.contract(gpu_t_right, dims);
+ sycl_device.memcpyDeviceToHost(t_result_gpu.data(), d_t_result, t_result_bytes);
+
t_result = t_left.contract(t_right, dims);
+ for (DenseIndex i = 0; i < t_result.size(); i++) {
+ if (static_cast<float>(fabs(t_result(i) - t_result_gpu(i))) < 1e-4f) {
+ continue;
+ }
+ if (Eigen::internal::isApprox(t_result(i), t_result_gpu(i), 1e-4f)) {
+ continue;
+ }
+ std::cout << "mismatch detected at index " << i << ": " << t_result(i)
+ << " vs " << t_result_gpu(i) << std::endl;
+ assert(false);
+ }
+ sycl_device.deallocate(d_t_left);
+ sycl_device.deallocate(d_t_right);
+ sycl_device.deallocate(d_t_result);
+}
+
+template<int DataLayout, typename Device>
+void test_TF(const Device& sycl_device)
+{
+ Eigen::array<long, 2> left_dims = {{2, 3}};
+ Eigen::array<long, 2> right_dims = {{3, 1}};
+ Eigen::array<long, 2> res_dims = {{2, 1}};
+ Eigen::array<DimPair, 1> dims = {{DimPair(1, 0)}};
+
+
+ Tensor<float, 2, DataLayout, long> t_left(left_dims);
+ Tensor<float, 2, DataLayout, long> t_right(right_dims);
+ Tensor<float, 2, DataLayout, long> t_result_gpu(res_dims);
+ Tensor<float, 2, DataLayout, long> t_result(res_dims);
+
+ t_left.data()[0] = 1.0f;
+ t_left.data()[1] = 2.0f;
+ t_left.data()[2] = 3.0f;
+ t_left.data()[3] = 4.0f;
+ t_left.data()[4] = 5.0f;
+ t_left.data()[5] = 6.0f;
+
+ t_right.data()[0] = -1.0f;
+ t_right.data()[1] = 0.5f;
+ t_right.data()[2] = 2.0f;
+
+ std::size_t t_left_bytes = t_left.size() * sizeof(float);
+ std::size_t t_right_bytes = t_right.size() * sizeof(float);
+ std::size_t t_result_bytes = t_result.size()*sizeof(float);
+
+
+ float * d_t_left = static_cast<float*>(sycl_device.allocate(t_left_bytes));
+ float * d_t_right = static_cast<float*>(sycl_device.allocate(t_right_bytes));
+ float * d_t_result = static_cast<float*>(sycl_device.allocate(t_result_bytes));
+
+ Eigen::TensorMap<Eigen::Tensor<float, 2, DataLayout, long> > gpu_t_left(d_t_left, left_dims);
+ Eigen::TensorMap<Eigen::Tensor<float, 2, DataLayout, long> > gpu_t_right(d_t_right, right_dims);
+ Eigen::TensorMap<Eigen::Tensor<float, 2, DataLayout, long> > gpu_t_result(d_t_result, res_dims);
+
+ sycl_device.memcpyHostToDevice(d_t_left, t_left.data(),t_left_bytes);
+ sycl_device.memcpyHostToDevice(d_t_right, t_right.data(),t_right_bytes);
+
+ gpu_t_result.device(sycl_device) = gpu_t_left.contract(gpu_t_right, dims);
sycl_device.memcpyDeviceToHost(t_result_gpu.data(), d_t_result, t_result_bytes);
+ t_result = t_left.contract(t_right, dims);
for (DenseIndex i = 0; i < t_result.size(); i++) {
if (static_cast<float>(fabs(t_result(i) - t_result_gpu(i))) < 1e-4f) {
@@ -84,9 +145,10 @@ void test_sycl_contraction(const Device& sycl_device, int m_size, int k_size, in
sycl_device.deallocate(d_t_left);
sycl_device.deallocate(d_t_right);
sycl_device.deallocate(d_t_result);
-}
+}
+
template<int DataLayout, typename Device>
void test_scalar(const Device& sycl_device, int m_size, int k_size, int n_size)
{
@@ -121,9 +183,10 @@ void test_scalar(const Device& sycl_device, int m_size, int k_size, int n_size)
sycl_device.memcpyHostToDevice(d_t_right, t_right.data(),t_right_bytes);
gpu_t_result.device(sycl_device) = gpu_t_left.contract(gpu_t_right, dims);
+ sycl_device.memcpyDeviceToHost(t_result_gpu.data(), d_t_result, t_result_bytes);
+
t_result = t_left.contract(t_right, dims);
- sycl_device.memcpyDeviceToHost(t_result_gpu.data(), d_t_result, t_result_bytes);
if (static_cast<float>(fabs(t_result() - t_result_gpu())) > 1e-4f &&
!Eigen::internal::isApprox(t_result(), t_result_gpu(), 1e-4f)) {
std::cout << "mismatch detected: " << t_result()
@@ -204,6 +267,9 @@ template <typename Dev_selector> void tensorContractionPerDevice(Dev_selector& s
test_sycl_contraction_k<RowMajor>(sycl_device);
test_sycl_contraction_sizes<ColMajor>(sycl_device);
test_sycl_contraction_sizes<RowMajor>(sycl_device);
+ test_TF<RowMajor>(sycl_device);
+ test_TF<ColMajor>(sycl_device);
+
end = std::chrono::system_clock::now();
std::chrono::duration<double> elapsed_seconds = end-start;
std::time_t end_time = std::chrono::system_clock::to_time_t(end);
@@ -211,6 +277,7 @@ template <typename Dev_selector> void tensorContractionPerDevice(Dev_selector& s
<< "elapsed time: " << elapsed_seconds.count() << "s\n";
}
+
void test_cxx11_tensor_contract_sycl() {
for (const auto& device :Eigen::get_sycl_supported_devices()) {
CALL_SUBTEST(tensorContractionPerDevice(device));
diff --git a/unsupported/test/cxx11_tensor_reverse_sycl.cpp b/unsupported/test/cxx11_tensor_reverse_sycl.cpp
new file mode 100644
index 000000000..73b394c18
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_reverse_sycl.cpp
@@ -0,0 +1,221 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2015
+// Mehdi Goli Codeplay Software Ltd.
+// Ralph Potter Codeplay Software Ltd.
+// Luke Iwanski Codeplay Software Ltd.
+// Contact: <eigen@codeplay.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#define EIGEN_TEST_NO_LONGDOUBLE
+#define EIGEN_TEST_NO_COMPLEX
+#define EIGEN_TEST_FUNC cxx11_tensor_reverse_sycl
+#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int
+#define EIGEN_USE_SYCL
+
+#include "main.h"
+#include <unsupported/Eigen/CXX11/Tensor>
+
+
+template <typename DataType, int DataLayout>
+static void test_simple_reverse(const Eigen::SyclDevice& sycl_device) {
+
+ int dim1 = 2;
+ int dim2 = 3;
+ int dim3 = 5;
+ int dim4 = 7;
+
+ array<int, 4> tensorRange = {{dim1, dim2, dim3, dim4}};
+ Tensor<DataType, 4, DataLayout> tensor(tensorRange);
+ Tensor<DataType, 4, DataLayout> reversed_tensor(tensorRange);
+ tensor.setRandom();
+
+ array<bool, 4> dim_rev;
+ dim_rev[0] = false;
+ dim_rev[1] = true;
+ dim_rev[2] = true;
+ dim_rev[3] = false;
+
+ DataType* gpu_in_data = static_cast<DataType*>(sycl_device.allocate(tensor.dimensions().TotalSize()*sizeof(DataType)));
+ DataType* gpu_out_data =static_cast<DataType*>(sycl_device.allocate(reversed_tensor.dimensions().TotalSize()*sizeof(DataType)));
+
+ TensorMap<Tensor<DataType, 4, DataLayout> > in_gpu(gpu_in_data, tensorRange);
+ TensorMap<Tensor<DataType, 4, DataLayout> > out_gpu(gpu_out_data, tensorRange);
+
+ sycl_device.memcpyHostToDevice(gpu_in_data, tensor.data(),(tensor.dimensions().TotalSize())*sizeof(DataType));
+ out_gpu.device(sycl_device) = in_gpu.reverse(dim_rev);
+ sycl_device.memcpyDeviceToHost(reversed_tensor.data(), gpu_out_data, reversed_tensor.dimensions().TotalSize()*sizeof(DataType));
+ // Check that the CPU and GPU reductions return the same result.
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 5; ++k) {
+ for (int l = 0; l < 7; ++l) {
+ VERIFY_IS_EQUAL(tensor(i,j,k,l), reversed_tensor(i,2-j,4-k,l));
+ }
+ }
+ }
+ }
+ dim_rev[0] = true;
+ dim_rev[1] = false;
+ dim_rev[2] = false;
+ dim_rev[3] = false;
+
+ out_gpu.device(sycl_device) = in_gpu.reverse(dim_rev);
+ sycl_device.memcpyDeviceToHost(reversed_tensor.data(), gpu_out_data, reversed_tensor.dimensions().TotalSize()*sizeof(DataType));
+
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 5; ++k) {
+ for (int l = 0; l < 7; ++l) {
+ VERIFY_IS_EQUAL(tensor(i,j,k,l), reversed_tensor(1-i,j,k,l));
+ }
+ }
+ }
+ }
+
+ dim_rev[0] = true;
+ dim_rev[1] = false;
+ dim_rev[2] = false;
+ dim_rev[3] = true;
+ out_gpu.device(sycl_device) = in_gpu.reverse(dim_rev);
+ sycl_device.memcpyDeviceToHost(reversed_tensor.data(), gpu_out_data, reversed_tensor.dimensions().TotalSize()*sizeof(DataType));
+
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 5; ++k) {
+ for (int l = 0; l < 7; ++l) {
+ VERIFY_IS_EQUAL(tensor(i,j,k,l), reversed_tensor(1-i,j,k,6-l));
+ }
+ }
+ }
+ }
+
+ sycl_device.deallocate(gpu_in_data);
+ sycl_device.deallocate(gpu_out_data);
+}
+
+
+
+template <typename DataType, int DataLayout>
+static void test_expr_reverse(const Eigen::SyclDevice& sycl_device, bool LValue)
+{
+ int dim1 = 2;
+ int dim2 = 3;
+ int dim3 = 5;
+ int dim4 = 7;
+
+ array<int, 4> tensorRange = {{dim1, dim2, dim3, dim4}};
+ Tensor<DataType, 4, DataLayout> tensor(tensorRange);
+ Tensor<DataType, 4, DataLayout> expected(tensorRange);
+ Tensor<DataType, 4, DataLayout> result(tensorRange);
+ tensor.setRandom();
+
+ array<bool, 4> dim_rev;
+ dim_rev[0] = false;
+ dim_rev[1] = true;
+ dim_rev[2] = false;
+ dim_rev[3] = true;
+
+ DataType* gpu_in_data = static_cast<DataType*>(sycl_device.allocate(tensor.dimensions().TotalSize()*sizeof(DataType)));
+ DataType* gpu_out_data_expected =static_cast<DataType*>(sycl_device.allocate(expected.dimensions().TotalSize()*sizeof(DataType)));
+ DataType* gpu_out_data_result =static_cast<DataType*>(sycl_device.allocate(result.dimensions().TotalSize()*sizeof(DataType)));
+
+ TensorMap<Tensor<DataType, 4, DataLayout> > in_gpu(gpu_in_data, tensorRange);
+ TensorMap<Tensor<DataType, 4, DataLayout> > out_gpu_expected(gpu_out_data_expected, tensorRange);
+ TensorMap<Tensor<DataType, 4, DataLayout> > out_gpu_result(gpu_out_data_result, tensorRange);
+
+
+ sycl_device.memcpyHostToDevice(gpu_in_data, tensor.data(),(tensor.dimensions().TotalSize())*sizeof(DataType));
+
+ if (LValue) {
+ out_gpu_expected.reverse(dim_rev).device(sycl_device) = in_gpu;
+ } else {
+ out_gpu_expected.device(sycl_device) = in_gpu.reverse(dim_rev);
+ }
+ sycl_device.memcpyDeviceToHost(expected.data(), gpu_out_data_expected, expected.dimensions().TotalSize()*sizeof(DataType));
+
+
+ array<int, 4> src_slice_dim;
+ src_slice_dim[0] = 2;
+ src_slice_dim[1] = 3;
+ src_slice_dim[2] = 1;
+ src_slice_dim[3] = 7;
+ array<int, 4> src_slice_start;
+ src_slice_start[0] = 0;
+ src_slice_start[1] = 0;
+ src_slice_start[2] = 0;
+ src_slice_start[3] = 0;
+ array<int, 4> dst_slice_dim = src_slice_dim;
+ array<int, 4> dst_slice_start = src_slice_start;
+
+ for (int i = 0; i < 5; ++i) {
+ if (LValue) {
+ out_gpu_result.slice(dst_slice_start, dst_slice_dim).reverse(dim_rev).device(sycl_device) =
+ in_gpu.slice(src_slice_start, src_slice_dim);
+ } else {
+ out_gpu_result.slice(dst_slice_start, dst_slice_dim).device(sycl_device) =
+ in_gpu.slice(src_slice_start, src_slice_dim).reverse(dim_rev);
+ }
+ src_slice_start[2] += 1;
+ dst_slice_start[2] += 1;
+ }
+ sycl_device.memcpyDeviceToHost(result.data(), gpu_out_data_result, result.dimensions().TotalSize()*sizeof(DataType));
+
+ for (int i = 0; i < expected.dimension(0); ++i) {
+ for (int j = 0; j < expected.dimension(1); ++j) {
+ for (int k = 0; k < expected.dimension(2); ++k) {
+ for (int l = 0; l < expected.dimension(3); ++l) {
+ VERIFY_IS_EQUAL(result(i,j,k,l), expected(i,j,k,l));
+ }
+ }
+ }
+ }
+
+ dst_slice_start[2] = 0;
+ result.setRandom();
+ sycl_device.memcpyHostToDevice(gpu_out_data_result, result.data(),(result.dimensions().TotalSize())*sizeof(DataType));
+ for (int i = 0; i < 5; ++i) {
+ if (LValue) {
+ out_gpu_result.slice(dst_slice_start, dst_slice_dim).reverse(dim_rev).device(sycl_device) =
+ in_gpu.slice(dst_slice_start, dst_slice_dim);
+ } else {
+ out_gpu_result.slice(dst_slice_start, dst_slice_dim).device(sycl_device) =
+ in_gpu.reverse(dim_rev).slice(dst_slice_start, dst_slice_dim);
+ }
+ dst_slice_start[2] += 1;
+ }
+ sycl_device.memcpyDeviceToHost(result.data(), gpu_out_data_result, result.dimensions().TotalSize()*sizeof(DataType));
+
+ for (int i = 0; i < expected.dimension(0); ++i) {
+ for (int j = 0; j < expected.dimension(1); ++j) {
+ for (int k = 0; k < expected.dimension(2); ++k) {
+ for (int l = 0; l < expected.dimension(3); ++l) {
+ VERIFY_IS_EQUAL(result(i,j,k,l), expected(i,j,k,l));
+ }
+ }
+ }
+ }
+}
+
+
+
+template<typename DataType> void sycl_reverse_test_per_device(const cl::sycl::device& d){
+ std::cout << "Running on " << d.template get_info<cl::sycl::info::device::name>() << std::endl;
+ QueueInterface queueInterface(d);
+ auto sycl_device = Eigen::SyclDevice(&queueInterface);
+ test_simple_reverse<DataType, RowMajor>(sycl_device);
+ test_simple_reverse<DataType, ColMajor>(sycl_device);
+ test_expr_reverse<DataType, RowMajor>(sycl_device, false);
+ test_expr_reverse<DataType, ColMajor>(sycl_device, false);
+ test_expr_reverse<DataType, RowMajor>(sycl_device, true);
+ test_expr_reverse<DataType, ColMajor>(sycl_device, true);
+}
+void test_cxx11_tensor_reverse_sycl() {
+ for (const auto& device :Eigen::get_sycl_supported_devices()) {
+ CALL_SUBTEST(sycl_reverse_test_per_device<float>(device));
+ }
+}
diff --git a/unsupported/test/cxx11_tensor_striding_sycl.cpp b/unsupported/test/cxx11_tensor_striding_sycl.cpp
new file mode 100644
index 000000000..2cbb18f1c
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_striding_sycl.cpp
@@ -0,0 +1,203 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2016
+// Mehdi Goli Codeplay Software Ltd.
+// Ralph Potter Codeplay Software Ltd.
+// Luke Iwanski Codeplay Software Ltd.
+// Contact: <eigen@codeplay.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#define EIGEN_TEST_NO_LONGDOUBLE
+#define EIGEN_TEST_NO_COMPLEX
+#define EIGEN_TEST_FUNC cxx11_tensor_striding_sycl
+#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int
+#define EIGEN_USE_SYCL
+
+#include <iostream>
+#include <chrono>
+#include <ctime>
+
+#include "main.h"
+#include <unsupported/Eigen/CXX11/Tensor>
+
+using Eigen::array;
+using Eigen::SyclDevice;
+using Eigen::Tensor;
+using Eigen::TensorMap;
+
+
+template <typename DataType, int DataLayout, typename IndexType>
+static void test_simple_striding(const Eigen::SyclDevice& sycl_device)
+{
+
+ Eigen::array<IndexType, 4> tensor_dims = {{2,3,5,7}};
+ Eigen::array<IndexType, 4> stride_dims = {{1,1,3,3}};
+
+
+ Tensor<DataType, 4, DataLayout, IndexType> tensor(tensor_dims);
+ Tensor<DataType, 4, DataLayout,IndexType> no_stride(tensor_dims);
+ Tensor<DataType, 4, DataLayout,IndexType> stride(stride_dims);
+
+
+ std::size_t tensor_bytes = tensor.size() * sizeof(DataType);
+ std::size_t no_stride_bytes = no_stride.size() * sizeof(DataType);
+ std::size_t stride_bytes = stride.size() * sizeof(DataType);
+ DataType * d_tensor = static_cast<DataType*>(sycl_device.allocate(tensor_bytes));
+ DataType * d_no_stride = static_cast<DataType*>(sycl_device.allocate(no_stride_bytes));
+ DataType * d_stride = static_cast<DataType*>(sycl_device.allocate(stride_bytes));
+
+ Eigen::TensorMap<Eigen::Tensor<DataType, 4, DataLayout, IndexType> > gpu_tensor(d_tensor, tensor_dims);
+ Eigen::TensorMap<Eigen::Tensor<DataType, 4, DataLayout, IndexType> > gpu_no_stride(d_no_stride, tensor_dims);
+ Eigen::TensorMap<Eigen::Tensor<DataType, 4, DataLayout, IndexType> > gpu_stride(d_stride, stride_dims);
+
+
+ tensor.setRandom();
+ array<IndexType, 4> strides;
+ strides[0] = 1;
+ strides[1] = 1;
+ strides[2] = 1;
+ strides[3] = 1;
+ sycl_device.memcpyHostToDevice(d_tensor, tensor.data(), tensor_bytes);
+ gpu_no_stride.device(sycl_device)=gpu_tensor.stride(strides);
+ sycl_device.memcpyDeviceToHost(no_stride.data(), d_no_stride, no_stride_bytes);
+
+ //no_stride = tensor.stride(strides);
+
+ VERIFY_IS_EQUAL(no_stride.dimension(0), 2);
+ VERIFY_IS_EQUAL(no_stride.dimension(1), 3);
+ VERIFY_IS_EQUAL(no_stride.dimension(2), 5);
+ VERIFY_IS_EQUAL(no_stride.dimension(3), 7);
+
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 5; ++k) {
+ for (int l = 0; l < 7; ++l) {
+ VERIFY_IS_EQUAL(tensor(i,j,k,l), no_stride(i,j,k,l));
+ }
+ }
+ }
+ }
+
+ strides[0] = 2;
+ strides[1] = 4;
+ strides[2] = 2;
+ strides[3] = 3;
+//Tensor<float, 4, DataLayout> stride;
+// stride = tensor.stride(strides);
+
+ gpu_stride.device(sycl_device)=gpu_tensor.stride(strides);
+ sycl_device.memcpyDeviceToHost(stride.data(), d_stride, stride_bytes);
+
+ VERIFY_IS_EQUAL(stride.dimension(0), 1);
+ VERIFY_IS_EQUAL(stride.dimension(1), 1);
+ VERIFY_IS_EQUAL(stride.dimension(2), 3);
+ VERIFY_IS_EQUAL(stride.dimension(3), 3);
+
+ for (int i = 0; i < 1; ++i) {
+ for (int j = 0; j < 1; ++j) {
+ for (int k = 0; k < 3; ++k) {
+ for (int l = 0; l < 3; ++l) {
+ VERIFY_IS_EQUAL(tensor(2*i,4*j,2*k,3*l), stride(i,j,k,l));
+ }
+ }
+ }
+ }
+
+ sycl_device.deallocate(d_tensor);
+ sycl_device.deallocate(d_no_stride);
+ sycl_device.deallocate(d_stride);
+}
+
+template <typename DataType, int DataLayout, typename IndexType>
+static void test_striding_as_lvalue(const Eigen::SyclDevice& sycl_device)
+{
+
+ Eigen::array<IndexType, 4> tensor_dims = {{2,3,5,7}};
+ Eigen::array<IndexType, 4> stride_dims = {{3,12,10,21}};
+
+
+ Tensor<DataType, 4, DataLayout, IndexType> tensor(tensor_dims);
+ Tensor<DataType, 4, DataLayout,IndexType> no_stride(stride_dims);
+ Tensor<DataType, 4, DataLayout,IndexType> stride(stride_dims);
+
+
+ std::size_t tensor_bytes = tensor.size() * sizeof(DataType);
+ std::size_t no_stride_bytes = no_stride.size() * sizeof(DataType);
+ std::size_t stride_bytes = stride.size() * sizeof(DataType);
+
+ DataType * d_tensor = static_cast<DataType*>(sycl_device.allocate(tensor_bytes));
+ DataType * d_no_stride = static_cast<DataType*>(sycl_device.allocate(no_stride_bytes));
+ DataType * d_stride = static_cast<DataType*>(sycl_device.allocate(stride_bytes));
+
+ Eigen::TensorMap<Eigen::Tensor<DataType, 4, DataLayout, IndexType> > gpu_tensor(d_tensor, tensor_dims);
+ Eigen::TensorMap<Eigen::Tensor<DataType, 4, DataLayout, IndexType> > gpu_no_stride(d_no_stride, stride_dims);
+ Eigen::TensorMap<Eigen::Tensor<DataType, 4, DataLayout, IndexType> > gpu_stride(d_stride, stride_dims);
+
+ //Tensor<float, 4, DataLayout> tensor(2,3,5,7);
+ tensor.setRandom();
+ array<IndexType, 4> strides;
+ strides[0] = 2;
+ strides[1] = 4;
+ strides[2] = 2;
+ strides[3] = 3;
+
+// Tensor<float, 4, DataLayout> result(3, 12, 10, 21);
+// result.stride(strides) = tensor;
+ sycl_device.memcpyHostToDevice(d_tensor, tensor.data(), tensor_bytes);
+ gpu_stride.stride(strides).device(sycl_device)=gpu_tensor;
+ sycl_device.memcpyDeviceToHost(stride.data(), d_stride, stride_bytes);
+
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 5; ++k) {
+ for (int l = 0; l < 7; ++l) {
+ VERIFY_IS_EQUAL(tensor(i,j,k,l), stride(2*i,4*j,2*k,3*l));
+ }
+ }
+ }
+ }
+
+ array<IndexType, 4> no_strides;
+ no_strides[0] = 1;
+ no_strides[1] = 1;
+ no_strides[2] = 1;
+ no_strides[3] = 1;
+// Tensor<float, 4, DataLayout> result2(3, 12, 10, 21);
+// result2.stride(strides) = tensor.stride(no_strides);
+
+ gpu_no_stride.stride(strides).device(sycl_device)=gpu_tensor.stride(no_strides);
+ sycl_device.memcpyDeviceToHost(no_stride.data(), d_no_stride, no_stride_bytes);
+
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 5; ++k) {
+ for (int l = 0; l < 7; ++l) {
+ VERIFY_IS_EQUAL(tensor(i,j,k,l), no_stride(2*i,4*j,2*k,3*l));
+ }
+ }
+ }
+ }
+ sycl_device.deallocate(d_tensor);
+ sycl_device.deallocate(d_no_stride);
+ sycl_device.deallocate(d_stride);
+}
+
+
+template <typename Dev_selector> void tensorStridingPerDevice(Dev_selector& s){
+ QueueInterface queueInterface(s);
+ auto sycl_device=Eigen::SyclDevice(&queueInterface);
+ test_simple_striding<float, ColMajor, ptrdiff_t>(sycl_device);
+ test_simple_striding<float, RowMajor, ptrdiff_t>(sycl_device);
+ test_striding_as_lvalue<float, ColMajor, ptrdiff_t>(sycl_device);
+ test_striding_as_lvalue<float, RowMajor, ptrdiff_t>(sycl_device);
+}
+
+void test_cxx11_tensor_striding_sycl() {
+ for (const auto& device :Eigen::get_sycl_supported_devices()) {
+ CALL_SUBTEST(tensorStridingPerDevice(device));
+ }
+}
diff --git a/unsupported/test/cxx11_tensor_sycl.cpp b/unsupported/test/cxx11_tensor_sycl.cpp
index d5c0cbaad..5992a306d 100644
--- a/unsupported/test/cxx11_tensor_sycl.cpp
+++ b/unsupported/test/cxx11_tensor_sycl.cpp
@@ -229,6 +229,36 @@ void test_sycl_computations(const Eigen::SyclDevice &sycl_device) {
sycl_device.deallocate(gpu_in3_data);
sycl_device.deallocate(gpu_out_data);
}
+template<typename Scalar1, typename Scalar2, int DataLayout>
+static void test_sycl_cast(const Eigen::SyclDevice& sycl_device){
+ int size = 20;
+ array<int, 1> tensorRange = {{size}};
+ Tensor<Scalar1, 1, DataLayout> in(tensorRange);
+ Tensor<Scalar2, 1, DataLayout> out(tensorRange);
+ Tensor<Scalar2, 1, DataLayout> out_host(tensorRange);
+
+ in = in.random();
+
+ Scalar1* gpu_in_data = static_cast<Scalar1*>(sycl_device.allocate(in.size()*sizeof(Scalar1)));
+ Scalar2 * gpu_out_data = static_cast<Scalar2*>(sycl_device.allocate(out.size()*sizeof(Scalar2)));
+
+
+
+
+ TensorMap<Tensor<Scalar1, 1, DataLayout>> gpu_in(gpu_in_data, tensorRange);
+ TensorMap<Tensor<Scalar2, 1, DataLayout>> gpu_out(gpu_out_data, tensorRange);
+ sycl_device.memcpyHostToDevice(gpu_in_data, in.data(),(in.size())*sizeof(Scalar1));
+ gpu_out.device(sycl_device) = gpu_in. template cast<Scalar2>();
+ sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data, out.size()*sizeof(Scalar2));
+ out_host = in. template cast<Scalar2>();
+ for(int i=0; i< size; i++)
+ {
+ VERIFY_IS_APPROX(out(i), out_host(i));
+ }
+ printf("cast Test Passed\n");
+ sycl_device.deallocate(gpu_in_data);
+ sycl_device.deallocate(gpu_out_data);
+}
template<typename DataType, typename dev_Selector> void sycl_computing_test_per_device(dev_Selector s){
QueueInterface queueInterface(s);
auto sycl_device = Eigen::SyclDevice(&queueInterface);
@@ -238,6 +268,8 @@ template<typename DataType, typename dev_Selector> void sycl_computing_test_per_
test_sycl_mem_transfers<DataType, ColMajor>(sycl_device);
test_sycl_computations<DataType, ColMajor>(sycl_device);
test_sycl_mem_sync<DataType, ColMajor>(sycl_device);
+ test_sycl_cast<DataType, int, RowMajor>(sycl_device);
+ test_sycl_cast<DataType, int, ColMajor>(sycl_device);
}
void test_cxx11_tensor_sycl() {