aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar Mehdi Goli <mehdi.goli@codeplay.com>2017-02-20 12:11:05 +0000
committerGravatar Mehdi Goli <mehdi.goli@codeplay.com>2017-02-20 12:11:05 +0000
commit79ebc8f76137f151c78b4f61cd99fae62bf6c34f (patch)
tree384d2c94a81ffc5516c78946e38c7675949d4dd5
parent91982b91c02deb5e1ce557bbc5c96fee19c636ed (diff)
Adding Sycl backend for TensorImagePatchOP.h; adding Sycl backend for TensorInflation.h.
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorImagePatch.h26
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorInflation.h6
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorSyclConvertToDeviceExpression.h14
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorSyclExprConstructor.h18
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractAccessor.h14
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractFunctors.h36
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorSyclLeafCount.h10
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorSyclPlaceHolderExpr.h14
-rw-r--r--unsupported/test/CMakeLists.txt2
-rw-r--r--unsupported/test/cxx11_tensor_image_patchOP_sycl.cpp1092
-rw-r--r--unsupported/test/cxx11_tensor_inflation_sycl.cpp136
11 files changed, 1356 insertions, 12 deletions
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorImagePatch.h b/unsupported/Eigen/CXX11/src/Tensor/TensorImagePatch.h
index 566856ed2..2fb6b84b9 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorImagePatch.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorImagePatch.h
@@ -70,12 +70,8 @@ class TensorImagePatchOp : public TensorBase<TensorImagePatchOp<Rows, Cols, XprT
DenseIndex in_row_strides, DenseIndex in_col_strides,
DenseIndex row_inflate_strides, DenseIndex col_inflate_strides,
PaddingType padding_type, Scalar padding_value)
- : m_xpr(expr), m_patch_rows(patch_rows), m_patch_cols(patch_cols),
- m_row_strides(row_strides), m_col_strides(col_strides),
- m_in_row_strides(in_row_strides), m_in_col_strides(in_col_strides),
- m_row_inflate_strides(row_inflate_strides), m_col_inflate_strides(col_inflate_strides),
- m_padding_explicit(false), m_padding_top(0), m_padding_bottom(0), m_padding_left(0), m_padding_right(0),
- m_padding_type(padding_type), m_padding_value(padding_value) {}
+ : TensorImagePatchOp(expr, patch_rows, patch_cols, row_strides,col_strides, in_row_strides, in_col_strides, row_inflate_strides,
+ col_inflate_strides, 0,0,0,0,padding_value, padding_type, false ){}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorImagePatchOp(const XprType& expr, DenseIndex patch_rows, DenseIndex patch_cols,
DenseIndex row_strides, DenseIndex col_strides,
@@ -83,14 +79,15 @@ class TensorImagePatchOp : public TensorBase<TensorImagePatchOp<Rows, Cols, XprT
DenseIndex row_inflate_strides, DenseIndex col_inflate_strides,
DenseIndex padding_top, DenseIndex padding_bottom,
DenseIndex padding_left, DenseIndex padding_right,
- Scalar padding_value)
+ Scalar padding_value, PaddingType padding_type=PADDING_VALID,
+ bool padding_explicit=true)
: m_xpr(expr), m_patch_rows(patch_rows), m_patch_cols(patch_cols),
m_row_strides(row_strides), m_col_strides(col_strides),
m_in_row_strides(in_row_strides), m_in_col_strides(in_col_strides),
m_row_inflate_strides(row_inflate_strides), m_col_inflate_strides(col_inflate_strides),
- m_padding_explicit(true), m_padding_top(padding_top), m_padding_bottom(padding_bottom),
+ m_padding_explicit(padding_explicit), m_padding_top(padding_top), m_padding_bottom(padding_bottom),
m_padding_left(padding_left), m_padding_right(padding_right),
- m_padding_type(PADDING_VALID), m_padding_value(padding_value) {}
+ m_padding_type(padding_type), m_padding_value(padding_value) {}
EIGEN_DEVICE_FUNC
DenseIndex patch_rows() const { return m_patch_rows; }
@@ -172,7 +169,7 @@ struct TensorEvaluator<const TensorImagePatchOp<Rows, Cols, ArgType>, Device>
};
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
- : m_impl(op.expression(), device)
+ : m_impl(op.expression(), device), m_op(op)
{
EIGEN_STATIC_ASSERT((NumDims >= 4), YOU_MADE_A_PROGRAMMING_MISTAKE);
@@ -241,6 +238,8 @@ struct TensorEvaluator<const TensorImagePatchOp<Rows, Cols, ArgType>, Device>
break;
default:
eigen_assert(false && "unexpected padding");
+ m_outputCols=0; // silence the uninitialised warnig;
+ m_outputRows=0; //// silence the uninitialised warnig;
}
}
eigen_assert(m_outputRows > 0);
@@ -420,7 +419,10 @@ struct TensorEvaluator<const TensorImagePatchOp<Rows, Cols, ArgType>, Device>
EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; }
- const TensorEvaluator<ArgType, Device>& impl() const { return m_impl; }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const TensorEvaluator<ArgType, Device>& impl() const { return m_impl; }
+ // required by sycl
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const XprType& xpr() const { return m_op; }
+
Index rowPaddingTop() const { return m_rowPaddingTop; }
Index colPaddingLeft() const { return m_colPaddingLeft; }
@@ -501,6 +503,8 @@ struct TensorEvaluator<const TensorImagePatchOp<Rows, Cols, ArgType>, Device>
Scalar m_paddingValue;
TensorEvaluator<ArgType, Device> m_impl;
+ // required for sycl
+ const XprType& m_op;
};
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorInflation.h b/unsupported/Eigen/CXX11/src/Tensor/TensorInflation.h
index f391fb9ee..b6bf05fed 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorInflation.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorInflation.h
@@ -215,6 +215,12 @@ struct TensorEvaluator<const TensorInflationOp<Strides, ArgType>, Device>
EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; }
+ /// required by sycl in order to extract the accessor
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const TensorEvaluator<ArgType, Device>& impl() const { return m_impl; }
+ /// required by sycl in order to extract the accessor
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Strides& functor() const { return m_strides; }
+
+
protected:
Dimensions m_dimensions;
array<Index, NumDims> m_outputStrides;
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclConvertToDeviceExpression.h b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclConvertToDeviceExpression.h
index ff5097141..5b4a9af9f 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclConvertToDeviceExpression.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclConvertToDeviceExpression.h
@@ -165,6 +165,20 @@ KERNELBROKERCONVERTCHIPPINGOP()
+
+/// specialisation of the \ref ConvertToDeviceExpression struct when the node type is TensorImagePatchOp
+#define KERNELBROKERCONVERTIMAGEPATCHOP(CVQual)\
+template<DenseIndex Rows, DenseIndex Cols, typename XprType>\
+struct ConvertToDeviceExpression<CVQual TensorImagePatchOp<Rows, Cols, XprType> >{\
+ typedef CVQual TensorImagePatchOp<Rows, Cols, typename ConvertToDeviceExpression<XprType>::Type> Type;\
+};
+KERNELBROKERCONVERTIMAGEPATCHOP(const)
+KERNELBROKERCONVERTIMAGEPATCHOP()
+#undef KERNELBROKERCONVERTIMAGEPATCHOP
+
+
+
+
} // namespace internal
} // namespace TensorSycl
} // namespace Eigen
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExprConstructor.h b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExprConstructor.h
index 6b6093fa3..57a10d06b 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExprConstructor.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExprConstructor.h
@@ -385,6 +385,24 @@ SYCLTENSORCHIPPINGOPEXPR(const)
SYCLTENSORCHIPPINGOPEXPR()
#undef SYCLTENSORCHIPPINGOPEXPR
+// TensorImagePatchOp
+#define SYCLTENSORIMAGEPATCHOPEXPR(CVQual)\
+template<DenseIndex Rows, DenseIndex Cols, typename OrigXprType, typename XprType, typename... Params>\
+struct ExprConstructor<CVQual TensorImagePatchOp<Rows, Cols, OrigXprType>, CVQual TensorImagePatchOp<Rows, Cols, XprType>, Params... > {\
+ typedef ExprConstructor<OrigXprType, XprType, Params...> my_xpr_type;\
+ typedef CVQual TensorImagePatchOp<Rows, Cols, typename my_xpr_type::Type> Type;\
+ my_xpr_type xprExpr;\
+ Type expr;\
+ template <typename FuncDetector>\
+ ExprConstructor(FuncDetector &funcD, const utility::tuple::Tuple<Params...> &t)\
+ : xprExpr(funcD.xprExpr, t), expr(xprExpr.expr, funcD.m_patch_rows, funcD.m_patch_cols, funcD.m_row_strides, funcD.m_col_strides,\
+ funcD.m_in_row_strides, funcD.m_in_col_strides, funcD.m_row_inflate_strides, funcD.m_col_inflate_strides, \
+ funcD.m_padding_top, funcD.m_padding_bottom, funcD.m_padding_left, funcD.m_padding_right, funcD.m_padding_value, funcD.m_padding_type, funcD.m_padding_explicit){}\
+};
+
+SYCLTENSORIMAGEPATCHOPEXPR(const)
+SYCLTENSORIMAGEPATCHOPEXPR()
+#undef SYCLTENSORIMAGEPATCHOPEXPR
// TensorLayoutSwapOp
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractAccessor.h b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractAccessor.h
index 213dd25ea..2be6f3710 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractAccessor.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractAccessor.h
@@ -227,6 +227,20 @@ SYCLTENSORCHIPPINGOPEXTACC()
// specialisation of the \ref ExtractAccessor struct when the node type is
+/// TensorImagePatchOp.
+#define SYCLTENSORIMAGEPATCHOPEXTACC(CVQual)\
+template<DenseIndex Rows, DenseIndex Cols, typename XprType, typename Dev>\
+struct ExtractAccessor<TensorEvaluator<CVQual TensorImagePatchOp<Rows, Cols, XprType>, Dev> >{\
+ static inline auto getTuple(cl::sycl::handler& cgh, const TensorEvaluator<CVQual TensorImagePatchOp<Rows, Cols, XprType>, Dev>& eval)\
+ RETURN_CPP11(AccessorConstructor::getTuple(cgh, eval.impl()))\
+};
+
+SYCLTENSORIMAGEPATCHOPEXTACC(const)
+SYCLTENSORIMAGEPATCHOPEXTACC()
+#undef SYCLTENSORIMAGEPATCHOPEXTACC
+
+
+// specialisation of the \ref ExtractAccessor struct when the node type is
/// TensorLayoutSwapOp.
#define SYCLTENSORLAYOUTSWAPOPEXTACC(CVQual)\
template<typename XprType, typename Dev>\
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractFunctors.h b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractFunctors.h
index 1506e8189..dbac01138 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractFunctors.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractFunctors.h
@@ -296,7 +296,7 @@ SYCLEXTRFUNCCONTRACTCONCAT(TensorConcatenationOp, axis(),)
//TensorChippingOp
#define SYCLEXTRFUNCCHIPPINGOP(CVQual)\
template<DenseIndex DimId, typename XprType, typename Device>\
-struct FunctorExtractor<TensorEvaluator<CVQual TensorChippingOp<DimId, XprType>, Device>>{\
+struct FunctorExtractor<TensorEvaluator<CVQual TensorChippingOp<DimId, XprType>, Device> >{\
FunctorExtractor<Eigen::TensorEvaluator<XprType, Device> > xprExpr;\
const DenseIndex m_dim;\
const DenseIndex m_offset;\
@@ -310,6 +310,40 @@ SYCLEXTRFUNCCHIPPINGOP(const)
SYCLEXTRFUNCCHIPPINGOP()
#undef SYCLEXTRFUNCCHIPPINGOP
+#define SYCLEXTRFUNCIMAGEPATCHOP(CVQual)\
+template<DenseIndex Rows, DenseIndex Cols, typename XprType, typename Device>\
+struct FunctorExtractor<TensorEvaluator<CVQual TensorImagePatchOp<Rows, Cols, XprType>, Device> >{\
+typedef CVQual TensorImagePatchOp<Rows, Cols, XprType> Self;\
+FunctorExtractor<Eigen::TensorEvaluator<XprType, Device> > xprExpr;\
+const DenseIndex m_patch_rows;\
+const DenseIndex m_patch_cols;\
+const DenseIndex m_row_strides;\
+const DenseIndex m_col_strides;\
+const DenseIndex m_in_row_strides;\
+const DenseIndex m_in_col_strides;\
+const DenseIndex m_row_inflate_strides;\
+const DenseIndex m_col_inflate_strides;\
+const bool m_padding_explicit;\
+const DenseIndex m_padding_top;\
+const DenseIndex m_padding_bottom;\
+const DenseIndex m_padding_left;\
+const DenseIndex m_padding_right;\
+const PaddingType m_padding_type;\
+const typename Self::Scalar m_padding_value;\
+FunctorExtractor(const TensorEvaluator<Self, Device>& expr)\
+: xprExpr(expr.impl()), m_patch_rows(expr.xpr().patch_rows()), m_patch_cols(expr.xpr().patch_cols()),\
+ m_row_strides(expr.xpr().row_strides()), m_col_strides(expr.xpr().col_strides()),\
+ m_in_row_strides(expr.xpr().in_row_strides()), m_in_col_strides(expr.xpr().in_col_strides()),\
+ m_row_inflate_strides(expr.xpr().row_inflate_strides()), m_col_inflate_strides(expr.xpr().col_inflate_strides()),\
+ m_padding_explicit(expr.xpr().padding_explicit()),m_padding_top(expr.xpr().padding_top()),\
+ m_padding_bottom(expr.xpr().padding_bottom()), m_padding_left(expr.xpr().padding_left()),\
+ m_padding_right(expr.xpr().padding_right()), m_padding_type(expr.xpr().padding_type()),\
+ m_padding_value(expr.xpr().padding_value()){}\
+};
+
+SYCLEXTRFUNCIMAGEPATCHOP(const)
+SYCLEXTRFUNCIMAGEPATCHOP()
+#undef SYCLEXTRFUNCIMAGEPATCHOP
/// template deduction function for FunctorExtractor
template <typename Evaluator>
auto inline extractFunctors(const Evaluator& evaluator)-> FunctorExtractor<Evaluator> {
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclLeafCount.h b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclLeafCount.h
index 15729310d..b8e658824 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclLeafCount.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclLeafCount.h
@@ -161,6 +161,16 @@ SLICESTRIDEOPLEAFCOUNT()
#undef SLICESTRIDEOPLEAFCOUNT
+#define TENSORIMAGEPATCHOPLEAFCOUNT(CVQual)\
+template<DenseIndex Rows, DenseIndex Cols, typename XprType>\
+struct LeafCount<CVQual TensorImagePatchOp<Rows, Cols, XprType> >:CategoryCount<XprType>{};
+
+
+TENSORIMAGEPATCHOPLEAFCOUNT(const)
+TENSORIMAGEPATCHOPLEAFCOUNT()
+#undef TENSORIMAGEPATCHOPLEAFCOUNT
+
+
} /// namespace TensorSycl
} /// namespace internal
} /// namespace Eigen
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclPlaceHolderExpr.h b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclPlaceHolderExpr.h
index ba0d17e0c..ab97235ae 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclPlaceHolderExpr.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclPlaceHolderExpr.h
@@ -221,6 +221,20 @@ SYCLSLICESTRIDEOPPLH()
#undef SYCLSLICESTRIDEOPPLH
+
+/// specialisation of the \ref PlaceHolderExpression when the node is
+/// TensorImagePatchOp
+#define SYCLTENSORIMAGEPATCHOP(CVQual)\
+template<DenseIndex Rows, DenseIndex Cols, typename XprType, size_t N>\
+struct PlaceHolderExpression<CVQual TensorImagePatchOp<Rows, Cols, XprType>, N> {\
+ typedef CVQual TensorImagePatchOp<Rows, Cols, typename CalculateIndex <N, XprType>::ArgType> Type;\
+};
+
+SYCLTENSORIMAGEPATCHOP(const)
+SYCLTENSORIMAGEPATCHOP()
+#undef SYCLTENSORIMAGEPATCHOP
+
+
/// template deduction for \ref PlaceHolderExpression struct
template <typename Expr>
struct createPlaceHolderExpression {
diff --git a/unsupported/test/CMakeLists.txt b/unsupported/test/CMakeLists.txt
index 57580f805..282f9eb55 100644
--- a/unsupported/test/CMakeLists.txt
+++ b/unsupported/test/CMakeLists.txt
@@ -169,6 +169,8 @@ if(EIGEN_TEST_CXX11)
ei_add_test_sycl(cxx11_tensor_chipping_sycl "-std=c++11")
ei_add_test_sycl(cxx11_tensor_patch_sycl "-std=c++11")
ei_add_test_sycl(cxx11_tensor_layout_swap_sycl "-std=c++11")
+ ei_add_test_sycl(cxx11_tensor_image_patchOP_sycl "-std=c++11")
+ ei_add_test_sycl(cxx11_tensor_inflation_sycl "-std=c++11")
endif(EIGEN_TEST_SYCL)
# It should be safe to always run these tests as there is some fallback code for
# older compiler that don't support cxx11.
diff --git a/unsupported/test/cxx11_tensor_image_patchOP_sycl.cpp b/unsupported/test/cxx11_tensor_image_patchOP_sycl.cpp
new file mode 100644
index 000000000..ba6b2f15a
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_image_patchOP_sycl.cpp
@@ -0,0 +1,1092 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2016
+// Mehdi Goli Codeplay Software Ltd.
+// Ralph Potter Codeplay Software Ltd.
+// Luke Iwanski Codeplay Software Ltd.
+// Contact: <eigen@codeplay.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#define EIGEN_TEST_NO_LONGDOUBLE
+#define EIGEN_TEST_NO_COMPLEX
+#define EIGEN_TEST_FUNC cxx11_tensor_image_patchOP_sycl
+#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int64_t
+#define EIGEN_USE_SYCL
+
+#include "main.h"
+#include <unsupported/Eigen/CXX11/Tensor>
+
+using Eigen::Tensor;
+static const int DataLayout = ColMajor;
+
+template <typename DataType, typename IndexType>
+static void test_simple_image_patch_sycl(const Eigen::SyclDevice& sycl_device)
+{
+ IndexType sizeDim1 = 2;
+ IndexType sizeDim2 = 3;
+ IndexType sizeDim3 = 5;
+ IndexType sizeDim4 = 7;
+ array<IndexType, 4> tensorColMajorRange = {{sizeDim1, sizeDim2, sizeDim3, sizeDim4}};
+ array<IndexType, 4> tensorRowMajorRange = {{sizeDim4, sizeDim3, sizeDim2, sizeDim1}};
+ Tensor<DataType, 4, DataLayout,IndexType> tensor_col_major(tensorColMajorRange);
+ Tensor<DataType, 4, RowMajor,IndexType> tensor_row_major(tensorRowMajorRange);
+ tensor_col_major.setRandom();
+
+ DataType* gpu_data_col_major = static_cast<DataType*>(sycl_device.allocate(tensor_col_major.size()*sizeof(DataType)));
+ DataType* gpu_data_row_major = static_cast<DataType*>(sycl_device.allocate(tensor_row_major.size()*sizeof(DataType)));
+ TensorMap<Tensor<DataType, 4, ColMajor, IndexType>> gpu_col_major(gpu_data_col_major, tensorColMajorRange);
+ TensorMap<Tensor<DataType, 4, RowMajor, IndexType>> gpu_row_major(gpu_data_row_major, tensorRowMajorRange);
+
+ sycl_device.memcpyHostToDevice(gpu_data_col_major, tensor_col_major.data(),(tensor_col_major.size())*sizeof(DataType));
+ gpu_row_major.device(sycl_device)=gpu_col_major.swap_layout();
+ sycl_device.memcpyDeviceToHost(tensor_row_major.data(), gpu_data_row_major, (tensor_col_major.size())*sizeof(DataType));
+
+ VERIFY_IS_EQUAL(tensor_col_major.dimension(0), tensor_row_major.dimension(3));
+ VERIFY_IS_EQUAL(tensor_col_major.dimension(1), tensor_row_major.dimension(2));
+ VERIFY_IS_EQUAL(tensor_col_major.dimension(2), tensor_row_major.dimension(1));
+ VERIFY_IS_EQUAL(tensor_col_major.dimension(3), tensor_row_major.dimension(0));
+
+ // Single pixel patch: ColMajor
+ array<IndexType, 5> patchColMajorTensorRange={{sizeDim1, 1, 1, sizeDim2*sizeDim3, sizeDim4}};
+ Tensor<DataType, 5, DataLayout,IndexType> single_patch_col_major(patchColMajorTensorRange);
+ size_t patchTensorBuffSize =single_patch_col_major.size()*sizeof(DataType);
+ DataType* gpu_data_single_patch_col_major = static_cast<DataType*>(sycl_device.allocate(patchTensorBuffSize));
+ TensorMap<Tensor<DataType, 5, DataLayout,IndexType>> gpu_single_patch_col_major(gpu_data_single_patch_col_major, patchColMajorTensorRange);
+ gpu_single_patch_col_major.device(sycl_device)=gpu_col_major.extract_image_patches(1, 1);
+ sycl_device.memcpyDeviceToHost(single_patch_col_major.data(), gpu_data_single_patch_col_major, patchTensorBuffSize);
+
+ VERIFY_IS_EQUAL(single_patch_col_major.dimension(0), 2);
+ VERIFY_IS_EQUAL(single_patch_col_major.dimension(1), 1);
+ VERIFY_IS_EQUAL(single_patch_col_major.dimension(2), 1);
+ VERIFY_IS_EQUAL(single_patch_col_major.dimension(3), 3*5);
+ VERIFY_IS_EQUAL(single_patch_col_major.dimension(4), 7);
+
+ // Single pixel patch: RowMajor
+ array<IndexType, 5> patchRowMajorTensorRange={{sizeDim4, sizeDim2*sizeDim3, 1, 1, sizeDim1}};
+ Tensor<DataType, 5, RowMajor,IndexType> single_patch_row_major(patchRowMajorTensorRange);
+ patchTensorBuffSize =single_patch_row_major.size()*sizeof(DataType);
+ DataType* gpu_data_single_patch_row_major = static_cast<DataType*>(sycl_device.allocate(patchTensorBuffSize));
+ TensorMap<Tensor<DataType, 5, RowMajor,IndexType>> gpu_single_patch_row_major(gpu_data_single_patch_row_major, patchRowMajorTensorRange);
+ gpu_single_patch_row_major.device(sycl_device)=gpu_row_major.extract_image_patches(1, 1);
+ sycl_device.memcpyDeviceToHost(single_patch_row_major.data(), gpu_data_single_patch_row_major, patchTensorBuffSize);
+
+ VERIFY_IS_EQUAL(single_patch_row_major.dimension(0), 7);
+ VERIFY_IS_EQUAL(single_patch_row_major.dimension(1), 3*5);
+ VERIFY_IS_EQUAL(single_patch_row_major.dimension(2), 1);
+ VERIFY_IS_EQUAL(single_patch_row_major.dimension(3), 1);
+ VERIFY_IS_EQUAL(single_patch_row_major.dimension(4), 2);
+
+ for (IndexType i = 0; i < tensor_col_major.size(); ++i) {
+ // ColMajor
+ if (tensor_col_major.data()[i] != single_patch_col_major.data()[i]) {
+ std::cout << "Mismatch detected at index colmajor " << i << " : "
+ << tensor_col_major.data()[i] << " vs " << single_patch_col_major.data()[i]
+ << std::endl;
+ }
+ VERIFY_IS_EQUAL(single_patch_col_major.data()[i], tensor_col_major.data()[i]);
+ // RowMajor
+ if (tensor_row_major.data()[i] != single_patch_row_major.data()[i]) {
+ std::cout << "Mismatch detected at index row major" << i << " : "
+ << tensor_row_major.data()[i] << " vs "
+ << single_patch_row_major.data()[i] << std::endl;
+ }
+ VERIFY_IS_EQUAL(single_patch_row_major.data()[i],
+ tensor_row_major.data()[i]);
+ VERIFY_IS_EQUAL(tensor_col_major.data()[i], tensor_row_major.data()[i]);
+ VERIFY_IS_EQUAL(single_patch_col_major.data()[i],
+ single_patch_row_major.data()[i]);
+ }
+
+
+ // Entire image patch: ColMajor
+ patchColMajorTensorRange={{sizeDim1, sizeDim2, sizeDim3, sizeDim2*sizeDim3, sizeDim4}};
+ Tensor<DataType, 5, DataLayout,IndexType> entire_image_patch_col_major(patchColMajorTensorRange);
+ patchTensorBuffSize =entire_image_patch_col_major.size()*sizeof(DataType);
+ DataType* gpu_data_entire_image_patch_col_major = static_cast<DataType*>(sycl_device.allocate(patchTensorBuffSize));
+ TensorMap<Tensor<DataType, 5, DataLayout,IndexType>> gpu_entire_image_patch_col_major(gpu_data_entire_image_patch_col_major, patchColMajorTensorRange);
+ gpu_entire_image_patch_col_major.device(sycl_device)=gpu_col_major.extract_image_patches(3, 5);
+ sycl_device.memcpyDeviceToHost(entire_image_patch_col_major.data(), gpu_data_entire_image_patch_col_major, patchTensorBuffSize);
+
+ VERIFY_IS_EQUAL(entire_image_patch_col_major.dimension(0), 2);
+ VERIFY_IS_EQUAL(entire_image_patch_col_major.dimension(1), 3);
+ VERIFY_IS_EQUAL(entire_image_patch_col_major.dimension(2), 5);
+ VERIFY_IS_EQUAL(entire_image_patch_col_major.dimension(3), 3*5);
+ VERIFY_IS_EQUAL(entire_image_patch_col_major.dimension(4), 7);
+
+ // Entire image patch: RowMajor
+ patchRowMajorTensorRange={{sizeDim4, sizeDim2*sizeDim3, sizeDim3, sizeDim2, sizeDim1}};
+ Tensor<DataType, 5, RowMajor,IndexType> entire_image_patch_row_major(patchRowMajorTensorRange);
+ patchTensorBuffSize =entire_image_patch_row_major.size()*sizeof(DataType);
+ DataType* gpu_data_entire_image_patch_row_major = static_cast<DataType*>(sycl_device.allocate(patchTensorBuffSize));
+ TensorMap<Tensor<DataType, 5, RowMajor,IndexType>> gpu_entire_image_patch_row_major(gpu_data_entire_image_patch_row_major, patchRowMajorTensorRange);
+ gpu_entire_image_patch_row_major.device(sycl_device)=gpu_row_major.extract_image_patches(3, 5);
+ sycl_device.memcpyDeviceToHost(entire_image_patch_row_major.data(), gpu_data_entire_image_patch_row_major, patchTensorBuffSize);
+
+ VERIFY_IS_EQUAL(entire_image_patch_row_major.dimension(0), 7);
+ VERIFY_IS_EQUAL(entire_image_patch_row_major.dimension(1), 3*5);
+ VERIFY_IS_EQUAL(entire_image_patch_row_major.dimension(2), 5);
+ VERIFY_IS_EQUAL(entire_image_patch_row_major.dimension(3), 3);
+ VERIFY_IS_EQUAL(entire_image_patch_row_major.dimension(4), 2);
+
+ for (IndexType i = 0; i < 3; ++i) {
+ for (IndexType j = 0; j < 5; ++j) {
+ int patchId = i+3*j;
+ for (IndexType r = 0; r < 3; ++r) {
+ for (IndexType c = 0; c < 5; ++c) {
+ for (IndexType d = 0; d < 2; ++d) {
+ for (IndexType b = 0; b < 7; ++b) {
+ DataType expected_col_major = 0.0f;
+ DataType expected_row_major = 0.0f;
+ if (r-1+i >= 0 && c-2+j >= 0 && r-1+i < 3 && c-2+j < 5) {
+ expected_col_major = tensor_col_major(d, r-1+i, c-2+j, b);
+ expected_row_major = tensor_row_major(b, c-2+j, r-1+i, d);
+ }
+ // ColMajor
+ if (entire_image_patch_col_major(d, r, c, patchId, b) != expected_col_major) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl;
+ }
+ VERIFY_IS_EQUAL(entire_image_patch_col_major(d, r, c, patchId, b), expected_col_major);
+ // RowMajor
+ if (entire_image_patch_row_major(b, patchId, c, r, d) !=
+ expected_row_major) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j
+ << " r=" << r << " c=" << c << " d=" << d << " b=" << b
+ << std::endl;
+ }
+ VERIFY_IS_EQUAL(entire_image_patch_row_major(b, patchId, c, r, d),
+ expected_row_major);
+ // Check that ColMajor and RowMajor agree.
+ VERIFY_IS_EQUAL(expected_col_major, expected_row_major);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // 2D patch: ColMajor
+ patchColMajorTensorRange={{sizeDim1, 2, 2, sizeDim2*sizeDim3, sizeDim4}};
+ Tensor<DataType, 5, DataLayout,IndexType> twod_patch_col_major(patchColMajorTensorRange);
+ patchTensorBuffSize =twod_patch_col_major.size()*sizeof(DataType);
+ DataType* gpu_data_twod_patch_col_major = static_cast<DataType*>(sycl_device.allocate(patchTensorBuffSize));
+ TensorMap<Tensor<DataType, 5, DataLayout,IndexType>> gpu_twod_patch_col_major(gpu_data_twod_patch_col_major, patchColMajorTensorRange);
+ gpu_twod_patch_col_major.device(sycl_device)=gpu_col_major.extract_image_patches(2, 2);
+ sycl_device.memcpyDeviceToHost(twod_patch_col_major.data(), gpu_data_twod_patch_col_major, patchTensorBuffSize);
+
+ VERIFY_IS_EQUAL(twod_patch_col_major.dimension(0), 2);
+ VERIFY_IS_EQUAL(twod_patch_col_major.dimension(1), 2);
+ VERIFY_IS_EQUAL(twod_patch_col_major.dimension(2), 2);
+ VERIFY_IS_EQUAL(twod_patch_col_major.dimension(3), 3*5);
+ VERIFY_IS_EQUAL(twod_patch_col_major.dimension(4), 7);
+
+ // 2D patch: RowMajor
+ patchRowMajorTensorRange={{sizeDim4, sizeDim2*sizeDim3, 2, 2, sizeDim1}};
+ Tensor<DataType, 5, RowMajor,IndexType> twod_patch_row_major(patchRowMajorTensorRange);
+ patchTensorBuffSize =twod_patch_row_major.size()*sizeof(DataType);
+ DataType* gpu_data_twod_patch_row_major = static_cast<DataType*>(sycl_device.allocate(patchTensorBuffSize));
+ TensorMap<Tensor<DataType, 5, RowMajor,IndexType>> gpu_twod_patch_row_major(gpu_data_twod_patch_row_major, patchRowMajorTensorRange);
+ gpu_twod_patch_row_major.device(sycl_device)=gpu_row_major.extract_image_patches(2, 2);
+ sycl_device.memcpyDeviceToHost(twod_patch_row_major.data(), gpu_data_twod_patch_row_major, patchTensorBuffSize);
+
+ VERIFY_IS_EQUAL(twod_patch_row_major.dimension(0), 7);
+ VERIFY_IS_EQUAL(twod_patch_row_major.dimension(1), 3*5);
+ VERIFY_IS_EQUAL(twod_patch_row_major.dimension(2), 2);
+ VERIFY_IS_EQUAL(twod_patch_row_major.dimension(3), 2);
+ VERIFY_IS_EQUAL(twod_patch_row_major.dimension(4), 2);
+
+
+ // Based on the calculation described in TensorTraits.h, padding happens to be 0.
+ IndexType row_padding = 0;
+ IndexType col_padding = 0;
+ IndexType stride = 1;
+
+ for (IndexType i = 0; i < 3; ++i) {
+ for (IndexType j = 0; j < 5; ++j) {
+ int patchId = i+3*j;
+ for (IndexType r = 0; r < 2; ++r) {
+ for (IndexType c = 0; c < 2; ++c) {
+ for (IndexType d = 0; d < 2; ++d) {
+ for (IndexType b = 0; b < 7; ++b) {
+ DataType expected_col_major = 0.0f;
+ DataType expected_row_major = 0.0f;
+ IndexType row_offset = r*stride + i - row_padding;
+ IndexType col_offset = c*stride + j - col_padding;
+ // ColMajor
+ if (row_offset >= 0 && col_offset >= 0 && row_offset < tensor_col_major.dimension(1) && col_offset < tensor_col_major.dimension(2)) {
+ expected_col_major = tensor_col_major(d, row_offset, col_offset, b);
+ }
+ if (twod_patch_col_major(d, r, c, patchId, b) != expected_col_major) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl;
+ }
+ VERIFY_IS_EQUAL(twod_patch_col_major(d, r, c, patchId, b), expected_col_major);
+
+ // RowMajor
+ if (row_offset >= 0 && col_offset >= 0 && row_offset < tensor_row_major.dimension(2) && col_offset < tensor_row_major.dimension(1)) {
+ expected_row_major = tensor_row_major(b, col_offset, row_offset, d);
+
+ }
+ if (twod_patch_row_major(b, patchId, c, r, d) != expected_row_major) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl;
+ }
+ VERIFY_IS_EQUAL(twod_patch_row_major(b, patchId, c, r, d), expected_row_major);
+ // Check that ColMajor and RowMajor agree.
+ VERIFY_IS_EQUAL(expected_col_major, expected_row_major);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ sycl_device.deallocate(gpu_data_col_major);
+ sycl_device.deallocate(gpu_data_row_major);
+ sycl_device.deallocate(gpu_data_single_patch_col_major);
+ sycl_device.deallocate(gpu_data_single_patch_row_major);
+ sycl_device.deallocate(gpu_data_entire_image_patch_col_major);
+ sycl_device.deallocate(gpu_data_entire_image_patch_row_major);
+ sycl_device.deallocate(gpu_data_twod_patch_col_major);
+ sycl_device.deallocate(gpu_data_twod_patch_row_major);
+
+}
+
+
+// Verifies VALID padding (no padding) with incrementing values.
+template <typename DataType, typename IndexType>
+static void test_patch_padding_valid_sycl(const Eigen::SyclDevice& sycl_device){
+ IndexType input_depth = 3;
+ IndexType input_rows = 3;
+ IndexType input_cols = 3;
+ IndexType input_batches = 1;
+ IndexType ksize = 2; // Corresponds to the Rows and Cols for tensor.extract_image_patches<>.
+ IndexType stride = 2; // Only same stride is supported.
+
+ array<IndexType, 4> tensorColMajorRange = {{input_depth, input_rows, input_cols, input_batches}};
+ array<IndexType, 4> tensorRowMajorRange = {{input_batches, input_cols, input_rows, input_depth}};
+ Tensor<DataType, 4, DataLayout,IndexType> tensor_col_major(tensorColMajorRange);
+ Tensor<DataType, 4, RowMajor,IndexType> tensor_row_major(tensorRowMajorRange);
+
+ DataType* gpu_data_col_major = static_cast<DataType*>(sycl_device.allocate(tensor_col_major.size()*sizeof(DataType)));
+ DataType* gpu_data_row_major = static_cast<DataType*>(sycl_device.allocate(tensor_row_major.size()*sizeof(DataType)));
+ TensorMap<Tensor<DataType, 4, ColMajor, IndexType>> gpu_col_major(gpu_data_col_major, tensorColMajorRange);
+ TensorMap<Tensor<DataType, 4, RowMajor, IndexType>> gpu_row_major(gpu_data_row_major, tensorRowMajorRange);
+
+ sycl_device.memcpyHostToDevice(gpu_data_col_major, tensor_col_major.data(),(tensor_col_major.size())*sizeof(DataType));
+ gpu_row_major.device(sycl_device)=gpu_col_major.swap_layout();
+ sycl_device.memcpyDeviceToHost(tensor_row_major.data(), gpu_data_row_major, (tensor_col_major.size())*sizeof(DataType));
+
+ VERIFY_IS_EQUAL(tensor_col_major.dimension(0), tensor_row_major.dimension(3));
+ VERIFY_IS_EQUAL(tensor_col_major.dimension(1), tensor_row_major.dimension(2));
+ VERIFY_IS_EQUAL(tensor_col_major.dimension(2), tensor_row_major.dimension(1));
+ VERIFY_IS_EQUAL(tensor_col_major.dimension(3), tensor_row_major.dimension(0));
+
+ // Initializes tensor with incrementing numbers.
+ for (IndexType i = 0; i < tensor_col_major.size(); ++i) {
+ tensor_col_major.data()[i] = i + 1;
+ }
+ // ColMajor
+ array<IndexType, 5> patchColMajorTensorRange={{input_depth, ksize, ksize, 1, input_batches}};
+ Tensor<DataType, 5, DataLayout,IndexType> result_col_major(patchColMajorTensorRange);
+ size_t patchTensorBuffSize =result_col_major.size()*sizeof(DataType);
+ DataType* gpu_data_result_col_major = static_cast<DataType*>(sycl_device.allocate(patchTensorBuffSize));
+ TensorMap<Tensor<DataType, 5, DataLayout,IndexType>> gpu_result_col_major(gpu_data_result_col_major, patchColMajorTensorRange);
+ gpu_result_col_major.device(sycl_device)=gpu_col_major.extract_image_patches(ksize, ksize, stride, stride, 1, 1, PADDING_VALID);
+ sycl_device.memcpyDeviceToHost(result_col_major.data(), gpu_data_result_col_major, patchTensorBuffSize);
+
+ VERIFY_IS_EQUAL(result_col_major.dimension(0), input_depth); // depth
+ VERIFY_IS_EQUAL(result_col_major.dimension(1), ksize); // kernel rows
+ VERIFY_IS_EQUAL(result_col_major.dimension(2), ksize); // kernel cols
+ VERIFY_IS_EQUAL(result_col_major.dimension(3), 1); // number of patches
+ VERIFY_IS_EQUAL(result_col_major.dimension(4), input_batches); // number of batches
+
+ // RowMajor
+ array<IndexType, 5> patchRowMajorTensorRange={{input_batches, 1, ksize, ksize, input_depth }};
+ Tensor<DataType, 5, RowMajor,IndexType> result_row_major(patchRowMajorTensorRange);
+ patchTensorBuffSize =result_row_major.size()*sizeof(DataType);
+ DataType* gpu_data_result_row_major = static_cast<DataType*>(sycl_device.allocate(patchTensorBuffSize));
+ TensorMap<Tensor<DataType, 5, RowMajor,IndexType>> gpu_result_row_major(gpu_data_result_row_major, patchRowMajorTensorRange);
+ gpu_result_row_major.device(sycl_device)=gpu_row_major.extract_image_patches(ksize, ksize, stride, stride, 1, 1, PADDING_VALID);
+ sycl_device.memcpyDeviceToHost(result_row_major.data(), gpu_data_result_row_major, patchTensorBuffSize);
+
+ VERIFY_IS_EQUAL(result_col_major.dimension(0), result_row_major.dimension(4));
+ VERIFY_IS_EQUAL(result_col_major.dimension(1), result_row_major.dimension(3));
+ VERIFY_IS_EQUAL(result_col_major.dimension(2), result_row_major.dimension(2));
+ VERIFY_IS_EQUAL(result_col_major.dimension(3), result_row_major.dimension(1));
+ VERIFY_IS_EQUAL(result_col_major.dimension(4), result_row_major.dimension(0));
+
+ // No padding is carried out.
+ IndexType row_padding = 0;
+ IndexType col_padding = 0;
+
+ for (IndexType i = 0; (i+stride+ksize-1) < input_rows; i += stride) { // input rows
+ for (IndexType j = 0; (j+stride+ksize-1) < input_cols; j += stride) { // input cols
+ int patchId = i+input_rows*j;
+ for (IndexType r = 0; r < ksize; ++r) { // patch rows
+ for (IndexType c = 0; c < ksize; ++c) { // patch cols
+ for (IndexType d = 0; d < input_depth; ++d) { // depth
+ for (IndexType b = 0; b < input_batches; ++b) { // batch
+ DataType expected_col_major = 0.0f;
+ DataType expected_row_major = 0.0f;
+ IndexType row_offset = r + i - row_padding;
+ IndexType col_offset = c + j - col_padding;
+ if (row_offset >= 0 && col_offset >= 0 && row_offset < input_rows && col_offset < input_cols) {
+ expected_col_major = tensor_col_major(d, row_offset, col_offset, b);
+ expected_row_major = tensor_row_major(b, col_offset, row_offset, d);
+ }
+ // ColMajor
+ if (result_col_major(d, r, c, patchId, b) != expected_col_major) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl;
+ }
+ VERIFY_IS_EQUAL(result_col_major(d, r, c, patchId, b), expected_col_major);
+ // RowMajor
+ if (result_row_major(b, patchId, c, r, d) != expected_row_major) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl;
+ }
+ VERIFY_IS_EQUAL(result_row_major(b, patchId, c, r, d), expected_row_major);
+ // Check that ColMajor and RowMajor agree.
+ VERIFY_IS_EQUAL(expected_col_major, expected_row_major);
+ }
+ }
+ }
+ }
+ }
+ }
+ sycl_device.deallocate(gpu_data_col_major);
+ sycl_device.deallocate(gpu_data_row_major);
+ sycl_device.deallocate(gpu_data_result_col_major);
+ sycl_device.deallocate(gpu_data_result_row_major);
+}
+
+// Verifies VALID padding (no padding) with the same value.
+template <typename DataType, typename IndexType>
+static void test_patch_padding_valid_same_value_sycl(const Eigen::SyclDevice& sycl_device){
+ IndexType input_depth = 1;
+ IndexType input_rows = 5;
+ IndexType input_cols = 5;
+ IndexType input_batches = 2;
+ IndexType ksize = 3; // Corresponds to the Rows and Cols for tensor.extract_image_patches<>.
+ IndexType stride = 2; // Only same stride is supported.
+ // ColMajor
+
+ array<IndexType, 4> tensorColMajorRange = {{input_depth, input_rows, input_cols, input_batches}};
+ array<IndexType, 4> tensorRowMajorRange = {{input_batches, input_cols, input_rows, input_depth}};
+ Tensor<DataType, 4, DataLayout,IndexType> tensor_col_major(tensorColMajorRange);
+ Tensor<DataType, 4, RowMajor,IndexType> tensor_row_major(tensorRowMajorRange);
+
+ DataType* gpu_data_col_major = static_cast<DataType*>(sycl_device.allocate(tensor_col_major.size()*sizeof(DataType)));
+ DataType* gpu_data_row_major = static_cast<DataType*>(sycl_device.allocate(tensor_row_major.size()*sizeof(DataType)));
+ TensorMap<Tensor<DataType, 4, ColMajor, IndexType>> gpu_col_major(gpu_data_col_major, tensorColMajorRange);
+ TensorMap<Tensor<DataType, 4, RowMajor, IndexType>> gpu_row_major(gpu_data_row_major, tensorRowMajorRange);
+ gpu_col_major.device(sycl_device)=gpu_col_major.constant(11.0f);
+ gpu_row_major.device(sycl_device)=gpu_col_major.swap_layout();
+ sycl_device.memcpyDeviceToHost(tensor_col_major.data(), gpu_data_col_major, (tensor_col_major.size())*sizeof(DataType));
+ sycl_device.memcpyDeviceToHost(tensor_row_major.data(), gpu_data_row_major, (tensor_row_major.size())*sizeof(DataType));
+ VERIFY_IS_EQUAL(tensor_col_major.dimension(0), tensor_row_major.dimension(3));
+ VERIFY_IS_EQUAL(tensor_col_major.dimension(1), tensor_row_major.dimension(2));
+ VERIFY_IS_EQUAL(tensor_col_major.dimension(2), tensor_row_major.dimension(1));
+ VERIFY_IS_EQUAL(tensor_col_major.dimension(3), tensor_row_major.dimension(0));
+
+ array<IndexType, 5> patchColMajorTensorRange={{input_depth, ksize, ksize, 4, input_batches}};
+ Tensor<DataType, 5, DataLayout,IndexType> result_col_major(patchColMajorTensorRange);
+ size_t patchTensorBuffSize =result_col_major.size()*sizeof(DataType);
+ DataType* gpu_data_result_col_major = static_cast<DataType*>(sycl_device.allocate(patchTensorBuffSize));
+ TensorMap<Tensor<DataType, 5, DataLayout,IndexType>> gpu_result_col_major(gpu_data_result_col_major, patchColMajorTensorRange);
+ gpu_result_col_major.device(sycl_device)=gpu_col_major.extract_image_patches(ksize, ksize, stride, stride, 1, 1, PADDING_VALID);
+ sycl_device.memcpyDeviceToHost(result_col_major.data(), gpu_data_result_col_major, patchTensorBuffSize);
+
+ VERIFY_IS_EQUAL(result_col_major.dimension(0), input_depth); // depth
+ VERIFY_IS_EQUAL(result_col_major.dimension(1), ksize); // kernel rows
+ VERIFY_IS_EQUAL(result_col_major.dimension(2), ksize); // kernel cols
+ VERIFY_IS_EQUAL(result_col_major.dimension(3), 4); // number of patches
+ VERIFY_IS_EQUAL(result_col_major.dimension(4), input_batches); // number of batches
+
+ // RowMajor
+ array<IndexType, 5> patchRowMajorTensorRange={{input_batches, 4, ksize, ksize, input_depth }};
+ Tensor<DataType, 5, RowMajor,IndexType> result_row_major(patchRowMajorTensorRange);
+ patchTensorBuffSize =result_row_major.size()*sizeof(DataType);
+ DataType* gpu_data_result_row_major = static_cast<DataType*>(sycl_device.allocate(patchTensorBuffSize));
+ TensorMap<Tensor<DataType, 5, RowMajor,IndexType>> gpu_result_row_major(gpu_data_result_row_major, patchRowMajorTensorRange);
+ gpu_result_row_major.device(sycl_device)=gpu_row_major.extract_image_patches(ksize, ksize, stride, stride, 1, 1, PADDING_VALID);
+ sycl_device.memcpyDeviceToHost(result_row_major.data(), gpu_data_result_row_major, patchTensorBuffSize);
+
+ VERIFY_IS_EQUAL(result_col_major.dimension(0), result_row_major.dimension(4));
+ VERIFY_IS_EQUAL(result_col_major.dimension(1), result_row_major.dimension(3));
+ VERIFY_IS_EQUAL(result_col_major.dimension(2), result_row_major.dimension(2));
+ VERIFY_IS_EQUAL(result_col_major.dimension(3), result_row_major.dimension(1));
+ VERIFY_IS_EQUAL(result_col_major.dimension(4), result_row_major.dimension(0));
+
+ // No padding is carried out.
+ IndexType row_padding = 0;
+ IndexType col_padding = 0;
+
+ for (IndexType i = 0; (i+stride+ksize-1) <= input_rows; i += stride) { // input rows
+ for (IndexType j = 0; (j+stride+ksize-1) <= input_cols; j += stride) { // input cols
+ IndexType patchId = i+input_rows*j;
+ for (IndexType r = 0; r < ksize; ++r) { // patch rows
+ for (IndexType c = 0; c < ksize; ++c) { // patch cols
+ for (IndexType d = 0; d < input_depth; ++d) { // depth
+ for (IndexType b = 0; b < input_batches; ++b) { // batch
+ DataType expected_col_major = 0.0f;
+ DataType expected_row_major = 0.0f;
+ IndexType row_offset = r + i - row_padding;
+ IndexType col_offset = c + j - col_padding;
+ if (row_offset >= 0 && col_offset >= 0 && row_offset < input_rows && col_offset < input_cols) {
+ expected_col_major = tensor_col_major(d, row_offset, col_offset, b);
+ expected_row_major = tensor_row_major(b, col_offset, row_offset, d);
+ }
+ // ColMajor
+ if (result_col_major(d, r, c, patchId, b) != expected_col_major) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl;
+ }
+ VERIFY_IS_EQUAL(result_col_major(d, r, c, patchId, b), expected_col_major);
+ // RowMajor
+ if (result_row_major(b, patchId, c, r, d) != expected_row_major) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl;
+ }
+ VERIFY_IS_EQUAL(result_row_major(b, patchId, c, r, d), expected_row_major);
+ // Check that ColMajor and RowMajor agree.
+ VERIFY_IS_EQUAL(expected_col_major, expected_row_major);
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+// Verifies SAME padding.
+template <typename DataType, typename IndexType>
+static void test_patch_padding_same_sycl(const Eigen::SyclDevice& sycl_device){
+ IndexType input_depth = 3;
+ IndexType input_rows = 4;
+ IndexType input_cols = 2;
+ IndexType input_batches = 1;
+ IndexType ksize = 2; // Corresponds to the Rows and Cols for tensor.extract_image_patches<>.
+ IndexType stride = 2; // Only same stride is supported.
+
+ // ColMajor
+ array<IndexType, 4> tensorColMajorRange = {{input_depth, input_rows, input_cols, input_batches}};
+ array<IndexType, 4> tensorRowMajorRange = {{input_batches, input_cols, input_rows, input_depth}};
+ Tensor<DataType, 4, DataLayout,IndexType> tensor_col_major(tensorColMajorRange);
+ Tensor<DataType, 4, RowMajor,IndexType> tensor_row_major(tensorRowMajorRange);
+
+ DataType* gpu_data_col_major = static_cast<DataType*>(sycl_device.allocate(tensor_col_major.size()*sizeof(DataType)));
+ DataType* gpu_data_row_major = static_cast<DataType*>(sycl_device.allocate(tensor_row_major.size()*sizeof(DataType)));
+ TensorMap<Tensor<DataType, 4, ColMajor, IndexType>> gpu_col_major(gpu_data_col_major, tensorColMajorRange);
+ TensorMap<Tensor<DataType, 4, RowMajor, IndexType>> gpu_row_major(gpu_data_row_major, tensorRowMajorRange);
+
+ sycl_device.memcpyHostToDevice(gpu_data_col_major, tensor_col_major.data(),(tensor_col_major.size())*sizeof(DataType));
+ gpu_row_major.device(sycl_device)=gpu_col_major.swap_layout();
+ sycl_device.memcpyDeviceToHost(tensor_row_major.data(), gpu_data_row_major, (tensor_col_major.size())*sizeof(DataType));
+
+ VERIFY_IS_EQUAL(tensor_col_major.dimension(0), tensor_row_major.dimension(3));
+ VERIFY_IS_EQUAL(tensor_col_major.dimension(1), tensor_row_major.dimension(2));
+ VERIFY_IS_EQUAL(tensor_col_major.dimension(2), tensor_row_major.dimension(1));
+ VERIFY_IS_EQUAL(tensor_col_major.dimension(3), tensor_row_major.dimension(0));
+
+ // Initializes tensor with incrementing numbers.
+ for (IndexType i = 0; i < tensor_col_major.size(); ++i) {
+ tensor_col_major.data()[i] = i + 1;
+ }
+
+array<IndexType, 5> patchColMajorTensorRange={{input_depth, ksize, ksize, 2, input_batches}};
+Tensor<DataType, 5, DataLayout,IndexType> result_col_major(patchColMajorTensorRange);
+size_t patchTensorBuffSize =result_col_major.size()*sizeof(DataType);
+DataType* gpu_data_result_col_major = static_cast<DataType*>(sycl_device.allocate(patchTensorBuffSize));
+TensorMap<Tensor<DataType, 5, DataLayout,IndexType>> gpu_result_col_major(gpu_data_result_col_major, patchColMajorTensorRange);
+gpu_result_col_major.device(sycl_device)=gpu_col_major.extract_image_patches(ksize, ksize, stride, stride, PADDING_SAME);
+sycl_device.memcpyDeviceToHost(result_col_major.data(), gpu_data_result_col_major, patchTensorBuffSize);
+
+
+ VERIFY_IS_EQUAL(result_col_major.dimension(0), input_depth); // depth
+ VERIFY_IS_EQUAL(result_col_major.dimension(1), ksize); // kernel rows
+ VERIFY_IS_EQUAL(result_col_major.dimension(2), ksize); // kernel cols
+ VERIFY_IS_EQUAL(result_col_major.dimension(3), 2); // number of patches
+ VERIFY_IS_EQUAL(result_col_major.dimension(4), input_batches); // number of batches
+
+ // RowMajor
+
+ array<IndexType, 5> patchRowMajorTensorRange={{input_batches, 2, ksize, ksize, input_depth }};
+ Tensor<DataType, 5, RowMajor,IndexType> result_row_major(patchRowMajorTensorRange);
+ patchTensorBuffSize =result_row_major.size()*sizeof(DataType);
+ DataType* gpu_data_result_row_major = static_cast<DataType*>(sycl_device.allocate(patchTensorBuffSize));
+ TensorMap<Tensor<DataType, 5, RowMajor,IndexType>> gpu_result_row_major(gpu_data_result_row_major, patchRowMajorTensorRange);
+ gpu_result_row_major.device(sycl_device)=gpu_row_major.extract_image_patches(ksize, ksize, stride, stride, PADDING_SAME);
+ sycl_device.memcpyDeviceToHost(result_row_major.data(), gpu_data_result_row_major, patchTensorBuffSize);
+
+ VERIFY_IS_EQUAL(result_col_major.dimension(0), result_row_major.dimension(4));
+ VERIFY_IS_EQUAL(result_col_major.dimension(1), result_row_major.dimension(3));
+ VERIFY_IS_EQUAL(result_col_major.dimension(2), result_row_major.dimension(2));
+ VERIFY_IS_EQUAL(result_col_major.dimension(3), result_row_major.dimension(1));
+ VERIFY_IS_EQUAL(result_col_major.dimension(4), result_row_major.dimension(0));
+
+ // Based on the calculation described in TensorTraits.h, padding happens to be 0.
+ IndexType row_padding = 0;
+ IndexType col_padding = 0;
+
+ for (IndexType i = 0; (i+stride+ksize-1) <= input_rows; i += stride) { // input rows
+ for (IndexType j = 0; (j+stride+ksize-1) <= input_cols; j += stride) { // input cols
+ int patchId = i+input_rows*j;
+ for (IndexType r = 0; r < ksize; ++r) { // patch rows
+ for (IndexType c = 0; c < ksize; ++c) { // patch cols
+ for (IndexType d = 0; d < input_depth; ++d) { // depth
+ for (IndexType b = 0; b < input_batches; ++b) { // batch
+ DataType expected_col_major = 0.0f;
+ DataType expected_row_major = 0.0f;
+ IndexType row_offset = r*stride + i - row_padding;
+ IndexType col_offset = c*stride + j - col_padding;
+ if (row_offset >= 0 && col_offset >= 0 && row_offset < input_rows && col_offset < input_cols) {
+ expected_col_major = tensor_col_major(d, row_offset, col_offset, b);
+ expected_row_major = tensor_row_major(b, col_offset, row_offset, d);
+ }
+ // ColMajor
+ if (result_col_major(d, r, c, patchId, b) != expected_col_major) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl;
+ }
+ VERIFY_IS_EQUAL(result_col_major(d, r, c, patchId, b), expected_col_major);
+ // RowMajor
+ if (result_row_major(b, patchId, c, r, d) != expected_row_major) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl;
+ }
+ VERIFY_IS_EQUAL(result_row_major(b, patchId, c, r, d), expected_row_major);
+ // Check that ColMajor and RowMajor agree.
+ VERIFY_IS_EQUAL(expected_col_major, expected_row_major);
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+
+template <typename DataType, typename IndexType>
+static void test_patch_no_extra_dim_sycl(const Eigen::SyclDevice& sycl_device){
+
+ IndexType sizeDim1 = 2;
+ IndexType sizeDim2 = 3;
+ IndexType sizeDim3 = 5;
+
+ // ColMajor
+ array<IndexType, 3> tensorColMajorRange = {{sizeDim1, sizeDim2, sizeDim3}};
+ array<IndexType, 3> tensorRowMajorRange = {{sizeDim3, sizeDim2, sizeDim1}};
+ Tensor<DataType, 3, DataLayout,IndexType> tensor_col_major(tensorColMajorRange);
+ tensor_col_major.setRandom();
+ Tensor<DataType, 3, RowMajor,IndexType> tensor_row_major(tensorRowMajorRange);
+
+ DataType* gpu_data_col_major = static_cast<DataType*>(sycl_device.allocate(tensor_col_major.size()*sizeof(DataType)));
+ DataType* gpu_data_row_major = static_cast<DataType*>(sycl_device.allocate(tensor_row_major.size()*sizeof(DataType)));
+ TensorMap<Tensor<DataType, 3, ColMajor, IndexType>> gpu_col_major(gpu_data_col_major, tensorColMajorRange);
+ TensorMap<Tensor<DataType, 3, RowMajor, IndexType>> gpu_row_major(gpu_data_row_major, tensorRowMajorRange);
+
+ sycl_device.memcpyHostToDevice(gpu_data_col_major, tensor_col_major.data(),(tensor_col_major.size())*sizeof(DataType));
+ gpu_row_major.device(sycl_device)=gpu_col_major.swap_layout();
+ sycl_device.memcpyDeviceToHost(tensor_row_major.data(), gpu_data_row_major, (tensor_row_major.size())*sizeof(DataType));
+
+ VERIFY_IS_EQUAL(tensor_col_major.dimension(0), tensor_row_major.dimension(2));
+ VERIFY_IS_EQUAL(tensor_col_major.dimension(1), tensor_row_major.dimension(1));
+ VERIFY_IS_EQUAL(tensor_col_major.dimension(2), tensor_row_major.dimension(0));
+
+
+ // Single pixel patch: ColMajor
+ array<IndexType, 4> patchColMajorTensorRange={{sizeDim1, 1, 1, sizeDim2*sizeDim3}};
+ Tensor<DataType, 4, DataLayout,IndexType> single_patch_col_major(patchColMajorTensorRange);
+ size_t patchTensorBuffSize =single_patch_col_major.size()*sizeof(DataType);
+ DataType* gpu_data_single_patch_col_major = static_cast<DataType*>(sycl_device.allocate(patchTensorBuffSize));
+ TensorMap<Tensor<DataType, 4, DataLayout,IndexType>> gpu_single_patch_col_major(gpu_data_single_patch_col_major, patchColMajorTensorRange);
+ gpu_single_patch_col_major.device(sycl_device)=gpu_col_major.extract_image_patches(1, 1);
+ sycl_device.memcpyDeviceToHost(single_patch_col_major.data(), gpu_data_single_patch_col_major, patchTensorBuffSize);
+
+ VERIFY_IS_EQUAL(single_patch_col_major.dimension(0), sizeDim1);
+ VERIFY_IS_EQUAL(single_patch_col_major.dimension(1), 1);
+ VERIFY_IS_EQUAL(single_patch_col_major.dimension(2), 1);
+ VERIFY_IS_EQUAL(single_patch_col_major.dimension(3), sizeDim2*sizeDim3);
+
+ // Single pixel patch: RowMajor
+ array<IndexType, 4> patchRowMajorTensorRange={{sizeDim2*sizeDim3, 1, 1, sizeDim1}};
+ Tensor<DataType, 4, RowMajor,IndexType> single_patch_row_major(patchRowMajorTensorRange);
+ patchTensorBuffSize =single_patch_row_major.size()*sizeof(DataType);
+ DataType* gpu_data_single_patch_row_major = static_cast<DataType*>(sycl_device.allocate(patchTensorBuffSize));
+ TensorMap<Tensor<DataType, 4, RowMajor,IndexType>> gpu_single_patch_row_major(gpu_data_single_patch_row_major, patchRowMajorTensorRange);
+ gpu_single_patch_row_major.device(sycl_device)=gpu_row_major.extract_image_patches(1, 1);
+ sycl_device.memcpyDeviceToHost(single_patch_row_major.data(), gpu_data_single_patch_row_major, patchTensorBuffSize);
+
+ VERIFY_IS_EQUAL(single_patch_row_major.dimension(0), sizeDim2*sizeDim3);
+ VERIFY_IS_EQUAL(single_patch_row_major.dimension(1), 1);
+ VERIFY_IS_EQUAL(single_patch_row_major.dimension(2), 1);
+ VERIFY_IS_EQUAL(single_patch_row_major.dimension(3), sizeDim1);
+
+ for (IndexType i = 0; i < tensor_col_major.size(); ++i) {
+ // ColMajor
+ if (tensor_col_major.data()[i] != single_patch_col_major.data()[i]) {
+ std::cout << "Mismatch detected at index " << i << " : " << tensor_col_major.data()[i] << " vs " << single_patch_col_major.data()[i] << std::endl;
+ }
+ VERIFY_IS_EQUAL(single_patch_col_major.data()[i], tensor_col_major.data()[i]);
+ // RowMajor
+ if (tensor_row_major.data()[i] != single_patch_row_major.data()[i]) {
+ std::cout << "Mismatch detected at index " << i << " : "
+ << tensor_col_major.data()[i] << " vs "
+ << single_patch_row_major.data()[i] << std::endl;
+ }
+ VERIFY_IS_EQUAL(single_patch_row_major.data()[i],
+ tensor_row_major.data()[i]);
+ VERIFY_IS_EQUAL(tensor_col_major.data()[i], tensor_row_major.data()[i]);
+ VERIFY_IS_EQUAL(single_patch_col_major.data()[i],
+ single_patch_row_major.data()[i]);
+ }
+
+ // Entire image patch: ColMajor
+ patchColMajorTensorRange={{sizeDim1, sizeDim2, sizeDim3, sizeDim2*sizeDim3}};
+ Tensor<DataType, 4, DataLayout,IndexType> entire_image_patch_col_major(patchColMajorTensorRange);
+ patchTensorBuffSize =entire_image_patch_col_major.size()*sizeof(DataType);
+ DataType* gpu_data_entire_image_patch_col_major = static_cast<DataType*>(sycl_device.allocate(patchTensorBuffSize));
+ TensorMap<Tensor<DataType, 4, DataLayout,IndexType>> gpu_entire_image_patch_col_major(gpu_data_entire_image_patch_col_major, patchColMajorTensorRange);
+ gpu_entire_image_patch_col_major.device(sycl_device)=gpu_col_major.extract_image_patches(3, 5);
+ sycl_device.memcpyDeviceToHost(entire_image_patch_col_major.data(), gpu_data_entire_image_patch_col_major, patchTensorBuffSize);
+
+ VERIFY_IS_EQUAL(entire_image_patch_col_major.dimension(0), 2);
+ VERIFY_IS_EQUAL(entire_image_patch_col_major.dimension(1), 3);
+ VERIFY_IS_EQUAL(entire_image_patch_col_major.dimension(2), 5);
+ VERIFY_IS_EQUAL(entire_image_patch_col_major.dimension(3), 3*5);
+
+ // Entire image patch: RowMajor
+patchRowMajorTensorRange={{sizeDim2*sizeDim3, sizeDim3, sizeDim2, sizeDim1}};
+Tensor<DataType, 4, RowMajor,IndexType> entire_image_patch_row_major(patchRowMajorTensorRange);
+patchTensorBuffSize =entire_image_patch_row_major.size()*sizeof(DataType);
+DataType* gpu_data_entire_image_patch_row_major = static_cast<DataType*>(sycl_device.allocate(patchTensorBuffSize));
+TensorMap<Tensor<DataType, 4, RowMajor,IndexType>> gpu_entire_image_patch_row_major(gpu_data_entire_image_patch_row_major, patchRowMajorTensorRange);
+gpu_entire_image_patch_row_major.device(sycl_device)=gpu_row_major.extract_image_patches(3, 5);
+sycl_device.memcpyDeviceToHost(entire_image_patch_row_major.data(), gpu_data_entire_image_patch_row_major, patchTensorBuffSize);
+ VERIFY_IS_EQUAL(entire_image_patch_row_major.dimension(0), 3*5);
+ VERIFY_IS_EQUAL(entire_image_patch_row_major.dimension(1), 5);
+ VERIFY_IS_EQUAL(entire_image_patch_row_major.dimension(2), 3);
+ VERIFY_IS_EQUAL(entire_image_patch_row_major.dimension(3), 2);
+
+ for (IndexType i = 0; i < 3; ++i) {
+ for (IndexType j = 0; j < 5; ++j) {
+ int patchId = i+3*j;
+ for (IndexType r = 0; r < 3; ++r) {
+ for (IndexType c = 0; c < 5; ++c) {
+ for (IndexType d = 0; d < 2; ++d) {
+ DataType expected_col_major = 0.0f;
+ DataType expected_row_major = 0.0f;
+ if (r-1+i >= 0 && c-2+j >= 0 && r-1+i < 3 && c-2+j < 5) {
+ expected_col_major = tensor_col_major(d, r-1+i, c-2+j);
+ expected_row_major = tensor_row_major(c-2+j, r-1+i, d);
+ }
+ // ColMajor
+ if (entire_image_patch_col_major(d, r, c, patchId) != expected_col_major) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << std::endl;
+ }
+ VERIFY_IS_EQUAL(entire_image_patch_col_major(d, r, c, patchId), expected_col_major);
+ // RowMajor
+ if (entire_image_patch_row_major(patchId, c, r, d) !=
+ expected_row_major) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << std::endl;
+ }
+ VERIFY_IS_EQUAL(entire_image_patch_row_major(patchId, c, r, d),
+ expected_row_major);
+ // Check that ColMajor and RowMajor agree.
+ VERIFY_IS_EQUAL(expected_col_major, expected_row_major);
+ }
+ }
+ }
+ }
+ }
+
+ // 2D patch: ColMajor
+ patchColMajorTensorRange={{sizeDim1, 2, 2, sizeDim2*sizeDim3}};
+ Tensor<DataType, 4, DataLayout,IndexType> twod_patch_col_major(patchColMajorTensorRange);
+ patchTensorBuffSize =twod_patch_col_major.size()*sizeof(DataType);
+ DataType* gpu_data_twod_patch_col_major = static_cast<DataType*>(sycl_device.allocate(patchTensorBuffSize));
+ TensorMap<Tensor<DataType, 4, DataLayout,IndexType>> gpu_twod_patch_col_major(gpu_data_twod_patch_col_major, patchColMajorTensorRange);
+ gpu_twod_patch_col_major.device(sycl_device)=gpu_col_major.extract_image_patches(2, 2);
+ sycl_device.memcpyDeviceToHost(twod_patch_col_major.data(), gpu_data_twod_patch_col_major, patchTensorBuffSize);
+
+ VERIFY_IS_EQUAL(twod_patch_col_major.dimension(0), 2);
+ VERIFY_IS_EQUAL(twod_patch_col_major.dimension(1), 2);
+ VERIFY_IS_EQUAL(twod_patch_col_major.dimension(2), 2);
+ VERIFY_IS_EQUAL(twod_patch_col_major.dimension(3), 3*5);
+
+ // 2D patch: RowMajor
+ patchRowMajorTensorRange={{sizeDim2*sizeDim3, 2, 2, sizeDim1}};
+ Tensor<DataType, 4, RowMajor,IndexType> twod_patch_row_major(patchRowMajorTensorRange);
+ patchTensorBuffSize =twod_patch_row_major.size()*sizeof(DataType);
+ DataType* gpu_data_twod_patch_row_major = static_cast<DataType*>(sycl_device.allocate(patchTensorBuffSize));
+ TensorMap<Tensor<DataType, 4, RowMajor,IndexType>> gpu_twod_patch_row_major(gpu_data_twod_patch_row_major, patchRowMajorTensorRange);
+ gpu_twod_patch_row_major.device(sycl_device)=gpu_row_major.extract_image_patches(2, 2);
+ sycl_device.memcpyDeviceToHost(twod_patch_row_major.data(), gpu_data_twod_patch_row_major, patchTensorBuffSize);
+ VERIFY_IS_EQUAL(twod_patch_row_major.dimension(0), 3*5);
+ VERIFY_IS_EQUAL(twod_patch_row_major.dimension(1), 2);
+ VERIFY_IS_EQUAL(twod_patch_row_major.dimension(2), 2);
+ VERIFY_IS_EQUAL(twod_patch_row_major.dimension(3), 2);
+
+ // Based on the calculation described in TensorTraits.h, padding happens to be 0.
+ IndexType row_padding = 0;
+ IndexType col_padding = 0;
+ IndexType stride = 1;
+
+ for (IndexType i = 0; i < 3; ++i) {
+ for (IndexType j = 0; j < 5; ++j) {
+ int patchId = i+3*j;
+ for (IndexType r = 0; r < 2; ++r) {
+ for (IndexType c = 0; c < 2; ++c) {
+ for (IndexType d = 0; d < 2; ++d) {
+ DataType expected_col_major = 0.0f;
+ DataType expected_row_major = 0.0f;
+ IndexType row_offset = r*stride + i - row_padding;
+ IndexType col_offset = c*stride + j - col_padding;
+ // ColMajor
+ if (row_offset >= 0 && col_offset >= 0 && row_offset < tensor_col_major.dimension(1) && col_offset < tensor_col_major.dimension(2)) {
+ expected_col_major = tensor_col_major(d, row_offset, col_offset);
+ }
+ if (twod_patch_col_major(d, r, c, patchId) != expected_col_major) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << std::endl;
+ }
+ VERIFY_IS_EQUAL(twod_patch_col_major(d, r, c, patchId), expected_col_major);
+ // RowMajor
+ if (row_offset >= 0 && col_offset >= 0 && row_offset < tensor_row_major.dimension(1) && col_offset < tensor_row_major.dimension(0)) {
+ expected_row_major = tensor_row_major(col_offset, row_offset, d);
+ }
+ if (twod_patch_row_major(patchId, c, r, d) != expected_row_major) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << std::endl;
+ }
+ VERIFY_IS_EQUAL(twod_patch_row_major(patchId, c, r, d), expected_row_major);
+ // Check that ColMajor and RowMajor agree.
+ VERIFY_IS_EQUAL(expected_col_major, expected_row_major);
+ }
+ }
+ }
+ }
+ }
+
+ sycl_device.deallocate(gpu_data_col_major);
+ sycl_device.deallocate(gpu_data_row_major);
+ sycl_device.deallocate(gpu_data_single_patch_col_major);
+ sycl_device.deallocate(gpu_data_single_patch_row_major);
+ sycl_device.deallocate(gpu_data_entire_image_patch_col_major);
+ sycl_device.deallocate(gpu_data_entire_image_patch_row_major);
+ sycl_device.deallocate(gpu_data_twod_patch_col_major);
+ sycl_device.deallocate(gpu_data_twod_patch_row_major);
+}
+
+template <typename DataType, typename IndexType>
+static void test_imagenet_patches_sycl(const Eigen::SyclDevice& sycl_device)
+{
+ // Test the code on typical configurations used by the 'imagenet' benchmarks at
+ // https://github.com/soumith/convnet-benchmarks
+ // ColMajor
+ IndexType sizeDim1 = 3;
+ IndexType sizeDim2 = 128;
+ IndexType sizeDim3 = 128;
+ IndexType sizeDim4 = 16;
+ array<IndexType, 4> tensorColMajorRange = {{sizeDim1, sizeDim2, sizeDim3, sizeDim4}};
+ Tensor<DataType, 4, DataLayout,IndexType> l_in_col_major(tensorColMajorRange);
+ l_in_col_major.setRandom();
+
+ DataType* gpu_data_l_in_col_major = static_cast<DataType*>(sycl_device.allocate(l_in_col_major.size()*sizeof(DataType)));
+ TensorMap<Tensor<DataType, 4, ColMajor, IndexType>> gpu_l_in_col_major(gpu_data_l_in_col_major, tensorColMajorRange);
+
+ sycl_device.memcpyHostToDevice(gpu_data_l_in_col_major, l_in_col_major.data(),(l_in_col_major.size())*sizeof(DataType));
+
+ array<IndexType, 5> patchTensorRange={{sizeDim1, 11, 11, sizeDim2*sizeDim3, sizeDim4}};
+ Tensor<DataType, 5, DataLayout,IndexType> l_out_col_major(patchTensorRange);
+ size_t patchTensorBuffSize =l_out_col_major.size()*sizeof(DataType);
+ DataType* gpu_data_l_out_col_major = static_cast<DataType*>(sycl_device.allocate(patchTensorBuffSize));
+ TensorMap<Tensor<DataType, 5, DataLayout,IndexType>> gpu_l_out_col_major(gpu_data_l_out_col_major, patchTensorRange);
+ gpu_l_out_col_major.device(sycl_device)=gpu_l_in_col_major.extract_image_patches(11, 11);
+ sycl_device.memcpyDeviceToHost(l_out_col_major.data(), gpu_data_l_out_col_major, patchTensorBuffSize);
+
+ VERIFY_IS_EQUAL(l_out_col_major.dimension(0), sizeDim1);
+ VERIFY_IS_EQUAL(l_out_col_major.dimension(1), 11);
+ VERIFY_IS_EQUAL(l_out_col_major.dimension(2), 11);
+ VERIFY_IS_EQUAL(l_out_col_major.dimension(3), sizeDim2*sizeDim3);
+ VERIFY_IS_EQUAL(l_out_col_major.dimension(4), sizeDim4);
+
+ // RowMajor
+ patchTensorRange={{sizeDim4, sizeDim2*sizeDim3, 11, 11, sizeDim1}};
+ Tensor<DataType, 5, RowMajor,IndexType> l_out_row_major(patchTensorRange);
+ patchTensorBuffSize =l_out_row_major.size()*sizeof(DataType);
+ DataType* gpu_data_l_out_row_major = static_cast<DataType*>(sycl_device.allocate(patchTensorBuffSize));
+ TensorMap<Tensor<DataType, 5, RowMajor,IndexType>> gpu_l_out_row_major(gpu_data_l_out_row_major, patchTensorRange);
+ gpu_l_out_row_major.device(sycl_device)=gpu_l_in_col_major.swap_layout().extract_image_patches(11, 11);
+ sycl_device.memcpyDeviceToHost(l_out_row_major.data(), gpu_data_l_out_row_major, patchTensorBuffSize);
+
+ VERIFY_IS_EQUAL(l_out_row_major.dimension(0), sizeDim4);
+ VERIFY_IS_EQUAL(l_out_row_major.dimension(1), sizeDim2*sizeDim3);
+ VERIFY_IS_EQUAL(l_out_row_major.dimension(2), 11);
+ VERIFY_IS_EQUAL(l_out_row_major.dimension(3), 11);
+ VERIFY_IS_EQUAL(l_out_row_major.dimension(4), sizeDim1);
+
+ for (IndexType b = 0; b < 16; ++b) {
+ for (IndexType i = 0; i < 128; ++i) {
+ for (IndexType j = 0; j < 128; ++j) {
+ int patchId = i+128*j;
+ for (IndexType c = 0; c < 11; ++c) {
+ for (IndexType r = 0; r < 11; ++r) {
+ for (IndexType d = 0; d < 3; ++d) {
+ DataType expected = 0.0f;
+ if (r-5+i >= 0 && c-5+j >= 0 && r-5+i < 128 && c-5+j < 128) {
+ expected = l_in_col_major(d, r-5+i, c-5+j, b);
+ }
+ // ColMajor
+ if (l_out_col_major(d, r, c, patchId, b) != expected) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl;
+ }
+ VERIFY_IS_EQUAL(l_out_col_major(d, r, c, patchId, b), expected);
+ // RowMajor
+ if (l_out_row_major(b, patchId, c, r, d) !=
+ expected) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j
+ << " r=" << r << " c=" << c << " d=" << d << " b=" << b
+ << std::endl;
+ }
+ VERIFY_IS_EQUAL(l_out_row_major(b, patchId, c, r, d),
+ expected);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // ColMajor
+ sycl_device.deallocate(gpu_data_l_in_col_major);
+ sycl_device.deallocate(gpu_data_l_out_col_major);
+ sizeDim1 = 16;
+ sizeDim2 = 64;
+ sizeDim3 = 64;
+ sizeDim4 = 32;
+ tensorColMajorRange = {{sizeDim1, sizeDim2, sizeDim3, sizeDim4}};
+ l_in_col_major.resize(tensorColMajorRange);
+ l_in_col_major.setRandom();
+ gpu_data_l_in_col_major = static_cast<DataType*>(sycl_device.allocate(l_in_col_major.size()*sizeof(DataType)));
+ TensorMap<Tensor<DataType, 4, ColMajor, IndexType>>gpu_l_in_col_major_resize1(gpu_data_l_in_col_major, tensorColMajorRange);
+
+ patchTensorRange={{sizeDim1, 9, 9, sizeDim2*sizeDim3, sizeDim4}};
+ l_out_col_major.resize(patchTensorRange);
+ patchTensorBuffSize =l_out_col_major.size()*sizeof(DataType);
+ gpu_data_l_out_col_major = static_cast<DataType*>(sycl_device.allocate(patchTensorBuffSize));
+ TensorMap<Tensor<DataType, 5, DataLayout,IndexType>>gpu_l_out_col_major_resize1(gpu_data_l_out_col_major, patchTensorRange);
+ sycl_device.memcpyHostToDevice(gpu_data_l_in_col_major, l_in_col_major.data(),(l_in_col_major.size())*sizeof(DataType));
+ gpu_l_out_col_major_resize1.device(sycl_device)=gpu_l_in_col_major_resize1.extract_image_patches(9, 9);
+ sycl_device.memcpyDeviceToHost(l_out_col_major.data(), gpu_data_l_out_col_major, patchTensorBuffSize);
+ VERIFY_IS_EQUAL(l_out_col_major.dimension(0), 16);
+ VERIFY_IS_EQUAL(l_out_col_major.dimension(1), 9);
+ VERIFY_IS_EQUAL(l_out_col_major.dimension(2), 9);
+ VERIFY_IS_EQUAL(l_out_col_major.dimension(3), 64*64);
+ VERIFY_IS_EQUAL(l_out_col_major.dimension(4), 32);
+
+// RowMajor
+ sycl_device.deallocate(gpu_data_l_out_row_major);
+ patchTensorRange={{sizeDim4, sizeDim2*sizeDim3, 9, 9 ,sizeDim1}};
+ l_out_row_major.resize(patchTensorRange);
+ patchTensorBuffSize =l_out_row_major.size()*sizeof(DataType);
+ gpu_data_l_out_row_major = static_cast<DataType*>(sycl_device.allocate(patchTensorBuffSize));
+ TensorMap<Tensor<DataType, 5, RowMajor,IndexType>>gpu_l_out_row_major_resize1(gpu_data_l_out_row_major, patchTensorRange);
+ gpu_l_out_row_major_resize1.device(sycl_device)=gpu_l_in_col_major_resize1.swap_layout().extract_image_patches(9, 9);
+ sycl_device.memcpyDeviceToHost(l_out_row_major.data(), gpu_data_l_out_row_major, patchTensorBuffSize);
+
+ VERIFY_IS_EQUAL(l_out_row_major.dimension(0), 32);
+ VERIFY_IS_EQUAL(l_out_row_major.dimension(1), 64*64);
+ VERIFY_IS_EQUAL(l_out_row_major.dimension(2), 9);
+ VERIFY_IS_EQUAL(l_out_row_major.dimension(3), 9);
+ VERIFY_IS_EQUAL(l_out_row_major.dimension(4), 16);
+
+ for (IndexType b = 0; b < 32; ++b) {
+ for (IndexType i = 0; i < 64; ++i) {
+ for (IndexType j = 0; j < 64; ++j) {
+ int patchId = i+64*j;
+ for (IndexType c = 0; c < 9; ++c) {
+ for (IndexType r = 0; r < 9; ++r) {
+ for (IndexType d = 0; d < 16; ++d) {
+ DataType expected = 0.0f;
+ if (r-4+i >= 0 && c-4+j >= 0 && r-4+i < 64 && c-4+j < 64) {
+ expected = l_in_col_major(d, r-4+i, c-4+j, b);
+ }
+ // ColMajor
+ if (l_out_col_major(d, r, c, patchId, b) != expected) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl;
+ }
+ VERIFY_IS_EQUAL(l_out_col_major(d, r, c, patchId, b), expected);
+ // RowMajor
+ if (l_out_row_major(b, patchId, c, r, d) != expected) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl;
+ }
+ VERIFY_IS_EQUAL(l_out_row_major(b, patchId, c, r, d), expected);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // ColMajor
+
+ sycl_device.deallocate(gpu_data_l_in_col_major);
+ sycl_device.deallocate(gpu_data_l_out_col_major);
+ sizeDim1 = 32;
+ sizeDim2 = 16;
+ sizeDim3 = 16;
+ sizeDim4 = 32;
+ tensorColMajorRange = {{sizeDim1, sizeDim2, sizeDim3, sizeDim4}};
+ l_in_col_major.resize(tensorColMajorRange);
+ l_in_col_major.setRandom();
+ gpu_data_l_in_col_major = static_cast<DataType*>(sycl_device.allocate(l_in_col_major.size()*sizeof(DataType)));
+ TensorMap<Tensor<DataType, 4, ColMajor, IndexType>>gpu_l_in_col_major_resize2(gpu_data_l_in_col_major, tensorColMajorRange);
+
+ patchTensorRange={{sizeDim1, 7, 7, sizeDim2*sizeDim3, sizeDim4}};
+ l_out_col_major.resize(patchTensorRange);
+ patchTensorBuffSize =l_out_col_major.size()*sizeof(DataType);
+ gpu_data_l_out_col_major = static_cast<DataType*>(sycl_device.allocate(patchTensorBuffSize));
+ TensorMap<Tensor<DataType, 5, DataLayout,IndexType>>gpu_l_out_col_major_resize2(gpu_data_l_out_col_major, patchTensorRange);
+ sycl_device.memcpyHostToDevice(gpu_data_l_in_col_major, l_in_col_major.data(),(l_in_col_major.size())*sizeof(DataType));
+ gpu_l_out_col_major_resize2.device(sycl_device)=gpu_l_in_col_major_resize2.extract_image_patches(7, 7);
+ sycl_device.memcpyDeviceToHost(l_out_col_major.data(), gpu_data_l_out_col_major, patchTensorBuffSize);
+
+ VERIFY_IS_EQUAL(l_out_col_major.dimension(0), 32);
+ VERIFY_IS_EQUAL(l_out_col_major.dimension(1), 7);
+ VERIFY_IS_EQUAL(l_out_col_major.dimension(2), 7);
+ VERIFY_IS_EQUAL(l_out_col_major.dimension(3), 16*16);
+ VERIFY_IS_EQUAL(l_out_col_major.dimension(4), 32);
+
+ // RowMajor
+ sycl_device.deallocate(gpu_data_l_out_row_major);
+ patchTensorRange={{sizeDim4, sizeDim2*sizeDim3, 7, 7 ,sizeDim1}};
+ l_out_row_major.resize(patchTensorRange);
+ patchTensorBuffSize =l_out_row_major.size()*sizeof(DataType);
+ gpu_data_l_out_row_major = static_cast<DataType*>(sycl_device.allocate(patchTensorBuffSize));
+ TensorMap<Tensor<DataType, 5, RowMajor,IndexType>>gpu_l_out_row_major_resize2(gpu_data_l_out_row_major, patchTensorRange);
+ gpu_l_out_row_major_resize2.device(sycl_device)=gpu_l_in_col_major_resize2.swap_layout().extract_image_patches(7, 7);
+ sycl_device.memcpyDeviceToHost(l_out_row_major.data(), gpu_data_l_out_row_major, patchTensorBuffSize);
+
+ VERIFY_IS_EQUAL(l_out_row_major.dimension(0), 32);
+ VERIFY_IS_EQUAL(l_out_row_major.dimension(1), 16*16);
+ VERIFY_IS_EQUAL(l_out_row_major.dimension(2), 7);
+ VERIFY_IS_EQUAL(l_out_row_major.dimension(3), 7);
+ VERIFY_IS_EQUAL(l_out_row_major.dimension(4), 32);
+
+ for (IndexType b = 0; b < 32; ++b) {
+ for (IndexType i = 0; i < 16; ++i) {
+ for (IndexType j = 0; j < 16; ++j) {
+ int patchId = i+16*j;
+ for (IndexType c = 0; c < 7; ++c) {
+ for (IndexType r = 0; r < 7; ++r) {
+ for (IndexType d = 0; d < 32; ++d) {
+ DataType expected = 0.0f;
+ if (r-3+i >= 0 && c-3+j >= 0 && r-3+i < 16 && c-3+j < 16) {
+ expected = l_in_col_major(d, r-3+i, c-3+j, b);
+ }
+ // ColMajor
+ if (l_out_col_major(d, r, c, patchId, b) != expected) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl;
+ }
+ VERIFY_IS_EQUAL(l_out_col_major(d, r, c, patchId, b), expected);
+ // RowMajor
+ if (l_out_row_major(b, patchId, c, r, d) != expected) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl;
+ }
+ VERIFY_IS_EQUAL(l_out_row_major(b, patchId, c, r, d), expected);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // ColMajor
+ sycl_device.deallocate(gpu_data_l_in_col_major);
+ sycl_device.deallocate(gpu_data_l_out_col_major);
+ sizeDim1 = 64;
+ sizeDim2 = 13;
+ sizeDim3 = 13;
+ sizeDim4 = 32;
+ tensorColMajorRange = {{sizeDim1, sizeDim2, sizeDim3, sizeDim4}};
+ l_in_col_major.resize(tensorColMajorRange);
+ l_in_col_major.setRandom();
+ gpu_data_l_in_col_major = static_cast<DataType*>(sycl_device.allocate(l_in_col_major.size()*sizeof(DataType)));
+ TensorMap<Tensor<DataType, 4, ColMajor, IndexType>>gpu_l_in_col_major_resize3(gpu_data_l_in_col_major, tensorColMajorRange);
+
+ patchTensorRange={{sizeDim1, 3, 3, sizeDim2*sizeDim3, sizeDim4}};
+ l_out_col_major.resize(patchTensorRange);
+ patchTensorBuffSize =l_out_col_major.size()*sizeof(DataType);
+ gpu_data_l_out_col_major = static_cast<DataType*>(sycl_device.allocate(patchTensorBuffSize));
+ TensorMap<Tensor<DataType, 5, DataLayout,IndexType>>gpu_l_out_col_major_resize3(gpu_data_l_out_col_major, patchTensorRange);
+ sycl_device.memcpyHostToDevice(gpu_data_l_in_col_major, l_in_col_major.data(),(l_in_col_major.size())*sizeof(DataType));
+ gpu_l_out_col_major_resize3.device(sycl_device)=gpu_l_in_col_major_resize3.extract_image_patches(3, 3);
+ sycl_device.memcpyDeviceToHost(l_out_col_major.data(), gpu_data_l_out_col_major, patchTensorBuffSize);
+
+ VERIFY_IS_EQUAL(l_out_col_major.dimension(0), 64);
+ VERIFY_IS_EQUAL(l_out_col_major.dimension(1), 3);
+ VERIFY_IS_EQUAL(l_out_col_major.dimension(2), 3);
+ VERIFY_IS_EQUAL(l_out_col_major.dimension(3), 13*13);
+ VERIFY_IS_EQUAL(l_out_col_major.dimension(4), 32);
+
+ // RowMajor
+ sycl_device.deallocate(gpu_data_l_out_row_major);
+ patchTensorRange={{sizeDim4, sizeDim2*sizeDim3, 3, 3 ,sizeDim1}};
+ l_out_row_major.resize(patchTensorRange);
+ patchTensorBuffSize =l_out_row_major.size()*sizeof(DataType);
+ gpu_data_l_out_row_major = static_cast<DataType*>(sycl_device.allocate(patchTensorBuffSize));
+ TensorMap<Tensor<DataType, 5, RowMajor,IndexType>>gpu_l_out_row_major_resize3(gpu_data_l_out_row_major, patchTensorRange);
+ gpu_l_out_row_major_resize3.device(sycl_device)=gpu_l_in_col_major_resize3.swap_layout().extract_image_patches(3, 3);
+ sycl_device.memcpyDeviceToHost(l_out_row_major.data(), gpu_data_l_out_row_major, patchTensorBuffSize);
+
+ VERIFY_IS_EQUAL(l_out_row_major.dimension(0), 32);
+ VERIFY_IS_EQUAL(l_out_row_major.dimension(1), 13*13);
+ VERIFY_IS_EQUAL(l_out_row_major.dimension(2), 3);
+ VERIFY_IS_EQUAL(l_out_row_major.dimension(3), 3);
+ VERIFY_IS_EQUAL(l_out_row_major.dimension(4), 64);
+
+ for (IndexType b = 0; b < 32; ++b) {
+ for (IndexType i = 0; i < 13; ++i) {
+ for (IndexType j = 0; j < 13; ++j) {
+ int patchId = i+13*j;
+ for (IndexType c = 0; c < 3; ++c) {
+ for (IndexType r = 0; r < 3; ++r) {
+ for (IndexType d = 0; d < 64; ++d) {
+ DataType expected = 0.0f;
+ if (r-1+i >= 0 && c-1+j >= 0 && r-1+i < 13 && c-1+j < 13) {
+ expected = l_in_col_major(d, r-1+i, c-1+j, b);
+ }
+ // ColMajor
+ if (l_out_col_major(d, r, c, patchId, b) != expected) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl;
+ }
+ VERIFY_IS_EQUAL(l_out_col_major(d, r, c, patchId, b), expected);
+ // RowMajor
+ if (l_out_row_major(b, patchId, c, r, d) != expected) {
+ std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl;
+ }
+ VERIFY_IS_EQUAL(l_out_row_major(b, patchId, c, r, d), expected);
+ }
+ }
+ }
+ }
+ }
+ }
+ sycl_device.deallocate(gpu_data_l_in_col_major);
+ sycl_device.deallocate(gpu_data_l_out_col_major);
+ sycl_device.deallocate(gpu_data_l_out_row_major);
+}
+
+
+template<typename DataType, typename dev_Selector> void sycl_tensor_image_patch_test_per_device(dev_Selector s){
+QueueInterface queueInterface(s);
+auto sycl_device = Eigen::SyclDevice(&queueInterface);
+test_simple_image_patch_sycl<DataType, int64_t>(sycl_device);
+test_patch_padding_valid_sycl<DataType, int64_t>(sycl_device);
+test_patch_padding_valid_same_value_sycl<DataType, int64_t>(sycl_device);
+test_patch_padding_same_sycl<DataType, int64_t>(sycl_device);
+test_patch_no_extra_dim_sycl<DataType, int64_t>(sycl_device);
+test_imagenet_patches_sycl<DataType, int64_t>(sycl_device);
+}
+void test_cxx11_tensor_image_patchOP_sycl()
+{
+for (const auto& device :Eigen::get_sycl_supported_devices()) {
+ CALL_SUBTEST(sycl_tensor_image_patch_test_per_device<float>(device));
+}
+}
diff --git a/unsupported/test/cxx11_tensor_inflation_sycl.cpp b/unsupported/test/cxx11_tensor_inflation_sycl.cpp
new file mode 100644
index 000000000..f2f87f7ed
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_inflation_sycl.cpp
@@ -0,0 +1,136 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2016
+// Mehdi Goli Codeplay Software Ltd.
+// Ralph Potter Codeplay Software Ltd.
+// Luke Iwanski Codeplay Software Ltd.
+// Contact: <eigen@codeplay.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#define EIGEN_TEST_NO_LONGDOUBLE
+#define EIGEN_TEST_NO_COMPLEX
+#define EIGEN_TEST_FUNC cxx11_tensor_inflation_sycl
+#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int64_t
+#define EIGEN_USE_SYCL
+
+#include "main.h"
+#include <unsupported/Eigen/CXX11/Tensor>
+
+using Eigen::Tensor;
+
+// Inflation Defenition for each dimention the inflated val would be
+//((dim-1)*strid[dim] +1)
+
+// for 1 dimnention vector of size 3 with value (4,4,4) with the inflated stride value of 3 would be changed to
+// tensor of size (2*3) +1 = 7 with the value of
+// (4, 0, 0, 4, 0, 0, 4).
+
+template <typename DataType, int DataLayout, typename IndexType>
+void test_simple_inflation_sycl(const Eigen::SyclDevice &sycl_device) {
+
+
+ IndexType sizeDim1 = 2;
+ IndexType sizeDim2 = 3;
+ IndexType sizeDim3 = 5;
+ IndexType sizeDim4 = 7;
+ array<IndexType, 4> tensorRange = {{sizeDim1, sizeDim2, sizeDim3, sizeDim4}};
+ Tensor<DataType, 4, DataLayout,IndexType> tensor(tensorRange);
+ Tensor<DataType, 4, DataLayout,IndexType> no_stride(tensorRange);
+ tensor.setRandom();
+
+ array<IndexType, 4> strides;
+ strides[0] = 1;
+ strides[1] = 1;
+ strides[2] = 1;
+ strides[3] = 1;
+
+
+ const size_t tensorBuffSize =tensor.size()*sizeof(DataType);
+ DataType* gpu_data_tensor = static_cast<DataType*>(sycl_device.allocate(tensorBuffSize));
+ DataType* gpu_data_no_stride = static_cast<DataType*>(sycl_device.allocate(tensorBuffSize));
+
+ TensorMap<Tensor<DataType, 4, DataLayout,IndexType>> gpu_tensor(gpu_data_tensor, tensorRange);
+ TensorMap<Tensor<DataType, 4, DataLayout,IndexType>> gpu_no_stride(gpu_data_no_stride, tensorRange);
+
+ sycl_device.memcpyHostToDevice(gpu_data_tensor, tensor.data(), tensorBuffSize);
+ gpu_no_stride.device(sycl_device)=gpu_tensor.inflate(strides);
+ sycl_device.memcpyDeviceToHost(no_stride.data(), gpu_data_no_stride, tensorBuffSize);
+
+ VERIFY_IS_EQUAL(no_stride.dimension(0), sizeDim1);
+ VERIFY_IS_EQUAL(no_stride.dimension(1), sizeDim2);
+ VERIFY_IS_EQUAL(no_stride.dimension(2), sizeDim3);
+ VERIFY_IS_EQUAL(no_stride.dimension(3), sizeDim4);
+
+ for (IndexType i = 0; i < 2; ++i) {
+ for (IndexType j = 0; j < 3; ++j) {
+ for (IndexType k = 0; k < 5; ++k) {
+ for (IndexType l = 0; l < 7; ++l) {
+ VERIFY_IS_EQUAL(tensor(i,j,k,l), no_stride(i,j,k,l));
+ }
+ }
+ }
+ }
+
+
+ strides[0] = 2;
+ strides[1] = 4;
+ strides[2] = 2;
+ strides[3] = 3;
+
+ IndexType inflatedSizeDim1 = 3;
+ IndexType inflatedSizeDim2 = 9;
+ IndexType inflatedSizeDim3 = 9;
+ IndexType inflatedSizeDim4 = 19;
+ array<IndexType, 4> inflatedTensorRange = {{inflatedSizeDim1, inflatedSizeDim2, inflatedSizeDim3, inflatedSizeDim4}};
+
+ Tensor<DataType, 4, DataLayout, IndexType> inflated(inflatedTensorRange);
+
+ const size_t inflatedTensorBuffSize =inflated.size()*sizeof(DataType);
+ DataType* gpu_data_inflated = static_cast<DataType*>(sycl_device.allocate(inflatedTensorBuffSize));
+ TensorMap<Tensor<DataType, 4, DataLayout, IndexType>> gpu_inflated(gpu_data_inflated, inflatedTensorRange);
+ gpu_inflated.device(sycl_device)=gpu_tensor.inflate(strides);
+ sycl_device.memcpyDeviceToHost(inflated.data(), gpu_data_inflated, inflatedTensorBuffSize);
+
+ VERIFY_IS_EQUAL(inflated.dimension(0), inflatedSizeDim1);
+ VERIFY_IS_EQUAL(inflated.dimension(1), inflatedSizeDim2);
+ VERIFY_IS_EQUAL(inflated.dimension(2), inflatedSizeDim3);
+ VERIFY_IS_EQUAL(inflated.dimension(3), inflatedSizeDim4);
+
+ for (IndexType i = 0; i < inflatedSizeDim1; ++i) {
+ for (IndexType j = 0; j < inflatedSizeDim2; ++j) {
+ for (IndexType k = 0; k < inflatedSizeDim3; ++k) {
+ for (IndexType l = 0; l < inflatedSizeDim4; ++l) {
+ if (i % strides[0] == 0 &&
+ j % strides[1] == 0 &&
+ k % strides[2] == 0 &&
+ l % strides[3] == 0) {
+ VERIFY_IS_EQUAL(inflated(i,j,k,l),
+ tensor(i/strides[0], j/strides[1], k/strides[2], l/strides[3]));
+ } else {
+ VERIFY_IS_EQUAL(0, inflated(i,j,k,l));
+ }
+ }
+ }
+ }
+ }
+ sycl_device.deallocate(gpu_data_tensor);
+ sycl_device.deallocate(gpu_data_no_stride);
+ sycl_device.deallocate(gpu_data_inflated);
+}
+
+template<typename DataType, typename dev_Selector> void sycl_inflation_test_per_device(dev_Selector s){
+ QueueInterface queueInterface(s);
+ auto sycl_device = Eigen::SyclDevice(&queueInterface);
+ test_simple_inflation_sycl<DataType, RowMajor, int64_t>(sycl_device);
+ test_simple_inflation_sycl<DataType, ColMajor, int64_t>(sycl_device);
+}
+void test_cxx11_tensor_inflation_sycl()
+{
+ for (const auto& device :Eigen::get_sycl_supported_devices()) {
+ CALL_SUBTEST(sycl_inflation_test_per_device<float>(device));
+ }
+}