aboutsummaryrefslogtreecommitdiffhomepage
path: root/unsupported/test/cxx11_tensor_morphing_sycl.cpp
diff options
context:
space:
mode:
authorGravatar Mehdi Goli <mehdi.goli@codeplay.com>2019-11-28 10:08:54 +0000
committerGravatar Mehdi Goli <mehdi.goli@codeplay.com>2019-11-28 10:08:54 +0000
commit00f32752f7d0b193c6788691c3cf0b76457a044d (patch)
tree792e46110f0751ea8802fa9d403d1472d5977ac3 /unsupported/test/cxx11_tensor_morphing_sycl.cpp
parentea51a9eace7e4f0ea839e61eb2df85ccfb94aee8 (diff)
[SYCL] Rebasing the SYCL support branch on top of the Einge upstream master branch.
* Unifying all loadLocalTile from lhs and rhs to an extract_block function. * Adding get_tensor operation which was missing in TensorContractionMapper. * Adding the -D method missing from cmake for Disable_Skinny Contraction operation. * Wrapping all the indices in TensorScanSycl into Scan parameter struct. * Fixing typo in Device SYCL * Unifying load to private register for tall/skinny no shared * Unifying load to vector tile for tensor-vector/vector-tensor operation * Removing all the LHS/RHS class for extracting data from global * Removing Outputfunction from TensorContractionSkinnyNoshared. * Combining the local memory version of tall/skinny and normal tensor contraction into one kernel. * Combining the no-local memory version of tall/skinny and normal tensor contraction into one kernel. * Combining General Tensor-Vector and VectorTensor contraction into one kernel. * Making double buffering optional for Tensor contraction when local memory is version is used. * Modifying benchmark to accept custom Reduction Sizes * Disabling AVX optimization for SYCL backend on the host to allow SSE optimization to the host * Adding Test for SYCL * Modifying SYCL CMake
Diffstat (limited to 'unsupported/test/cxx11_tensor_morphing_sycl.cpp')
-rw-r--r--unsupported/test/cxx11_tensor_morphing_sycl.cpp138
1 files changed, 138 insertions, 0 deletions
diff --git a/unsupported/test/cxx11_tensor_morphing_sycl.cpp b/unsupported/test/cxx11_tensor_morphing_sycl.cpp
index 93dabe3ec..bf001b40f 100644
--- a/unsupported/test/cxx11_tensor_morphing_sycl.cpp
+++ b/unsupported/test/cxx11_tensor_morphing_sycl.cpp
@@ -180,6 +180,82 @@ static void test_simple_slice(const Eigen::SyclDevice &sycl_device)
sycl_device.deallocate(gpu_data3);
}
+
+template <typename DataType, int DataLayout, typename IndexType>
+static void test_strided_slice_as_rhs_sycl(const Eigen::SyclDevice &sycl_device)
+{
+ IndexType sizeDim1 = 2;
+ IndexType sizeDim2 = 3;
+ IndexType sizeDim3 = 5;
+ IndexType sizeDim4 = 7;
+ IndexType sizeDim5 = 11;
+ typedef Eigen::DSizes<IndexType, 5> Index5;
+ Index5 strides(1L,1L,1L,1L,1L);
+ Index5 indicesStart(1L,2L,3L,4L,5L);
+ Index5 indicesStop(2L,3L,4L,5L,6L);
+ Index5 lengths(1L,1L,1L,1L,1L);
+
+ array<IndexType, 5> tensorRange = {{sizeDim1, sizeDim2, sizeDim3, sizeDim4, sizeDim5}};
+ Tensor<DataType, 5, DataLayout, IndexType> tensor(tensorRange);
+ tensor.setRandom();
+
+ array<IndexType, 5> slice1_range ={{1, 1, 1, 1, 1}};
+ Tensor<DataType, 5,DataLayout, IndexType> slice1(slice1_range);
+ Tensor<DataType, 5, DataLayout, IndexType> slice_stride1(slice1_range);
+
+ DataType* gpu_data1 = static_cast<DataType*>(sycl_device.allocate(tensor.size()*sizeof(DataType)));
+ DataType* gpu_data2 = static_cast<DataType*>(sycl_device.allocate(slice1.size()*sizeof(DataType)));
+ DataType* gpu_data_stride2 = static_cast<DataType*>(sycl_device.allocate(slice_stride1.size()*sizeof(DataType)));
+
+ TensorMap<Tensor<DataType, 5,DataLayout, IndexType>> gpu1(gpu_data1, tensorRange);
+ TensorMap<Tensor<DataType, 5,DataLayout, IndexType>> gpu2(gpu_data2, slice1_range);
+ TensorMap<Tensor<DataType, 5,DataLayout, IndexType>> gpu_stride2(gpu_data_stride2, slice1_range);
+
+ Eigen::DSizes<IndexType, 5> indices(1,2,3,4,5);
+ Eigen::DSizes<IndexType, 5> sizes(1,1,1,1,1);
+ sycl_device.memcpyHostToDevice(gpu_data1, tensor.data(),(tensor.size())*sizeof(DataType));
+ gpu2.device(sycl_device)=gpu1.slice(indices, sizes);
+ sycl_device.memcpyDeviceToHost(slice1.data(), gpu_data2,(slice1.size())*sizeof(DataType));
+
+ gpu_stride2.device(sycl_device)=gpu1.stridedSlice(indicesStart,indicesStop,strides);
+ sycl_device.memcpyDeviceToHost(slice_stride1.data(), gpu_data_stride2,(slice_stride1.size())*sizeof(DataType));
+
+ VERIFY_IS_EQUAL(slice1(0,0,0,0,0), tensor(1,2,3,4,5));
+ VERIFY_IS_EQUAL(slice_stride1(0,0,0,0,0), tensor(1,2,3,4,5));
+
+ array<IndexType, 5> slice2_range ={{1,1,2,2,3}};
+ Tensor<DataType, 5,DataLayout, IndexType> slice2(slice2_range);
+ Tensor<DataType, 5, DataLayout, IndexType> strideSlice2(slice2_range);
+
+ DataType* gpu_data3 = static_cast<DataType*>(sycl_device.allocate(slice2.size()*sizeof(DataType)));
+ DataType* gpu_data_stride3 = static_cast<DataType*>(sycl_device.allocate(strideSlice2.size()*sizeof(DataType)));
+ TensorMap<Tensor<DataType, 5,DataLayout, IndexType>> gpu3(gpu_data3, slice2_range);
+ TensorMap<Tensor<DataType, 5,DataLayout, IndexType>> gpu_stride3(gpu_data_stride3, slice2_range);
+ Eigen::DSizes<IndexType, 5> indices2(1,1,3,4,5);
+ Eigen::DSizes<IndexType, 5> sizes2(1,1,2,2,3);
+ Index5 strides2(1L,1L,1L,1L,1L);
+ Index5 indicesStart2(1L,1L,3L,4L,5L);
+ Index5 indicesStop2(2L,2L,5L,6L,8L);
+
+ gpu3.device(sycl_device)=gpu1.slice(indices2, sizes2);
+ sycl_device.memcpyDeviceToHost(slice2.data(), gpu_data3,(slice2.size())*sizeof(DataType));
+
+ gpu_stride3.device(sycl_device)=gpu1.stridedSlice(indicesStart2,indicesStop2,strides2);
+ sycl_device.memcpyDeviceToHost(strideSlice2.data(), gpu_data_stride3,(strideSlice2.size())*sizeof(DataType));
+
+ for (IndexType i = 0; i < 2; ++i) {
+ for (IndexType j = 0; j < 2; ++j) {
+ for (IndexType k = 0; k < 3; ++k) {
+ VERIFY_IS_EQUAL(slice2(0,0,i,j,k), tensor(1,1,3+i,4+j,5+k));
+ VERIFY_IS_EQUAL(strideSlice2(0,0,i,j,k), tensor(1,1,3+i,4+j,5+k));
+ }
+ }
+ }
+ sycl_device.deallocate(gpu_data1);
+ sycl_device.deallocate(gpu_data2);
+ sycl_device.deallocate(gpu_data3);
+}
+
template<typename DataType, int DataLayout, typename IndexType>
static void test_strided_slice_write_sycl(const Eigen::SyclDevice& sycl_device)
{
@@ -228,6 +304,65 @@ static void test_strided_slice_write_sycl(const Eigen::SyclDevice& sycl_device)
sycl_device.deallocate(gpu_data3);
}
+template <typename OutIndex, typename DSizes>
+Eigen::array<OutIndex, DSizes::count> To32BitDims(const DSizes& in) {
+ Eigen::array<OutIndex, DSizes::count> out;
+ for (int i = 0; i < DSizes::count; ++i) {
+ out[i] = in[i];
+ }
+ return out;
+}
+
+template <class DataType, int DataLayout, typename IndexType, typename ConvertedIndexType>
+int run_eigen(const SyclDevice& sycl_device) {
+ using TensorI64 = Tensor<DataType, 5, DataLayout, IndexType>;
+ using TensorI32 = Tensor<DataType, 5, DataLayout, ConvertedIndexType>;
+ using TensorMI64 = TensorMap<TensorI64>;
+ using TensorMI32 = TensorMap<TensorI32>;
+ Eigen::array<IndexType, 5> tensor_range{{4, 1, 1, 1, 6}};
+ Eigen::array<IndexType, 5> slice_range{{4, 1, 1, 1, 3}};
+
+ TensorI64 out_tensor_gpu(tensor_range);
+ TensorI64 out_tensor_cpu(tensor_range);
+ out_tensor_cpu.setRandom();
+
+ TensorI64 sub_tensor(slice_range);
+ sub_tensor.setRandom();
+
+ DataType* out_gpu_data = static_cast<DataType*>(sycl_device.allocate(out_tensor_cpu.size() * sizeof(DataType)));
+ DataType* sub_gpu_data = static_cast<DataType*>(sycl_device.allocate(sub_tensor.size() * sizeof(DataType)));
+ TensorMI64 out_gpu(out_gpu_data, tensor_range);
+ TensorMI64 sub_gpu(sub_gpu_data, slice_range);
+
+ sycl_device.memcpyHostToDevice(out_gpu_data, out_tensor_cpu.data(), out_tensor_cpu.size() * sizeof(DataType));
+ sycl_device.memcpyHostToDevice(sub_gpu_data, sub_tensor.data(), sub_tensor.size() * sizeof(DataType));
+
+ Eigen::array<ConvertedIndexType, 5> slice_offset_32{{0, 0, 0, 0, 3}};
+ Eigen::array<ConvertedIndexType, 5> slice_range_32{{4, 1, 1, 1, 3}};
+ TensorMI32 out_cpu_32(out_tensor_cpu.data(), To32BitDims<ConvertedIndexType>(out_tensor_cpu.dimensions()));
+ TensorMI32 sub_cpu_32(sub_tensor.data(), To32BitDims<ConvertedIndexType>(sub_tensor.dimensions()));
+ TensorMI32 out_gpu_32(out_gpu.data(), To32BitDims<ConvertedIndexType>(out_gpu.dimensions()));
+ TensorMI32 sub_gpu_32(sub_gpu.data(), To32BitDims<ConvertedIndexType>(sub_gpu.dimensions()));
+
+ out_gpu_32.slice(slice_offset_32, slice_range_32).device(sycl_device) = sub_gpu_32;
+
+ out_cpu_32.slice(slice_offset_32, slice_range_32) = sub_cpu_32;
+
+ sycl_device.memcpyDeviceToHost(out_tensor_gpu.data(), out_gpu_data, out_tensor_cpu.size() * sizeof(DataType));
+ int has_err = 0;
+ for (IndexType i = 0; i < out_tensor_cpu.size(); ++i) {
+ auto exp = out_tensor_cpu(i);
+ auto val = out_tensor_gpu(i);
+ if (val != exp) {
+ std::cout << "#" << i << " got " << val << " but expected " << exp << std::endl;
+ has_err = 1;
+ }
+ }
+ sycl_device.deallocate(out_gpu_data);
+ sycl_device.deallocate(sub_gpu_data);
+ return has_err;
+}
+
template<typename DataType, typename dev_Selector> void sycl_morphing_test_per_device(dev_Selector s){
QueueInterface queueInterface(s);
auto sycl_device = Eigen::SyclDevice(&queueInterface);
@@ -239,6 +374,9 @@ template<typename DataType, typename dev_Selector> void sycl_morphing_test_per_d
test_reshape_as_lvalue<DataType, ColMajor, int64_t>(sycl_device);
test_strided_slice_write_sycl<DataType, ColMajor, int64_t>(sycl_device);
test_strided_slice_write_sycl<DataType, RowMajor, int64_t>(sycl_device);
+ test_strided_slice_as_rhs_sycl<DataType, ColMajor, int64_t>(sycl_device);
+ test_strided_slice_as_rhs_sycl<DataType, RowMajor, int64_t>(sycl_device);
+ run_eigen<float, RowMajor, long, int>(sycl_device);
}
EIGEN_DECLARE_TEST(cxx11_tensor_morphing_sycl)
{