aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar Mehdi Goli <mehdi.goli@codeplay.com>2017-02-15 16:28:12 +0000
committerGravatar Mehdi Goli <mehdi.goli@codeplay.com>2017-02-15 16:28:12 +0000
commit91982b91c02deb5e1ce557bbc5c96fee19c636ed (patch)
treef2face079780ae5b385d2fbb63a308ec12bb1051
parentb1e312edd607bcfa99192d53f55b2ac974644c44 (diff)
Adding TensorLayoutSwapOp for sycl.
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorSyclConvertToDeviceExpression.h29
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorSyclExprConstructor.h21
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractAccessor.h15
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractFunctors.h29
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorSyclLeafCount.h14
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorSyclPlaceHolderExpr.h17
-rw-r--r--unsupported/test/CMakeLists.txt1
-rw-r--r--unsupported/test/cxx11_tensor_layout_swap_sycl.cpp126
-rw-r--r--unsupported/test/cxx11_tensor_patch_sycl.cpp9
9 files changed, 217 insertions, 44 deletions
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclConvertToDeviceExpression.h b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclConvertToDeviceExpression.h
index ee8f3c9c2..ff5097141 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclConvertToDeviceExpression.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclConvertToDeviceExpression.h
@@ -91,27 +91,34 @@ ASSIGNCONVERT(, false)
#undef ASSIGNCONVERT
/// specialisation of the \ref ConvertToDeviceExpression struct when the node
-/// type is either TensorForcedEvalOp or TensorEvalToOp
+/// type is TensorEvalToOp
#define KERNELBROKERCONVERT(CVQual, Res, ExprNode)\
template <typename Expr>\
struct ConvertToDeviceExpression<CVQual ExprNode<Expr> > \
: DeviceConvertor<ExprNode, Res, Expr>{};
-/// specialisation of the \ref ConvertToDeviceExpression struct when the node type is TensorForcedEvalOp
-#define KERNELBROKERCONVERTFORCEDEVAL(CVQual)\
+
+KERNELBROKERCONVERT(const, true, TensorEvalToOp)
+KERNELBROKERCONVERT(, false, TensorEvalToOp)
+#undef KERNELBROKERCONVERT
+
+/// specialisation of the \ref ConvertToDeviceExpression struct when the node types are TensorForcedEvalOp and TensorLayoutSwapOp
+#define KERNELBROKERCONVERTFORCEDEVALLAYOUTSWAP(CVQual, ExprNode)\
template <typename Expr>\
-struct ConvertToDeviceExpression<CVQual TensorForcedEvalOp<Expr> > {\
- typedef CVQual TensorForcedEvalOp< typename ConvertToDeviceExpression<Expr>::Type> Type;\
+struct ConvertToDeviceExpression<CVQual ExprNode<Expr> > {\
+ typedef CVQual ExprNode< typename ConvertToDeviceExpression<Expr>::Type> Type;\
};
-KERNELBROKERCONVERTFORCEDEVAL(const)
-KERNELBROKERCONVERTFORCEDEVAL()
-#undef KERNELBROKERCONVERTFORCEDEVAL
+// TensorForcedEvalOp
+KERNELBROKERCONVERTFORCEDEVALLAYOUTSWAP(const,TensorForcedEvalOp)
+KERNELBROKERCONVERTFORCEDEVALLAYOUTSWAP(,TensorForcedEvalOp)
+
+// TensorLayoutSwapOp
+KERNELBROKERCONVERTFORCEDEVALLAYOUTSWAP(const,TensorLayoutSwapOp)
+KERNELBROKERCONVERTFORCEDEVALLAYOUTSWAP(,TensorLayoutSwapOp)
+#undef KERNELBROKERCONVERTFORCEDEVALLAYOUTSWAP
-KERNELBROKERCONVERT(const, true, TensorEvalToOp)
-KERNELBROKERCONVERT(, false, TensorEvalToOp)
-#undef KERNELBROKERCONVERT
/// specialisation of the \ref ConvertToDeviceExpression struct when the node type is TensorReductionOp
#define KERNELBROKERCONVERTREDUCTION(CVQual)\
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExprConstructor.h b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExprConstructor.h
index 3b83b1d2c..6b6093fa3 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExprConstructor.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExprConstructor.h
@@ -223,7 +223,7 @@ struct ExprConstructor<CVQual TensorEvalToOp<OrigExpr, MakeGlobalPointer>, CVQua
Type expr;\
template <typename FuncDetector>\
ExprConstructor(FuncDetector &funcD, const utility::tuple::Tuple<Params...> &t)\
- : nestedExpression(funcD.rhsExpr, t), buffer(t), expr(buffer.expr, nestedExpression.expr) {}\
+ : nestedExpression(funcD.xprExpr, t), buffer(t), expr(buffer.expr, nestedExpression.expr) {}\
};
EVALTO(const)
@@ -386,6 +386,25 @@ SYCLTENSORCHIPPINGOPEXPR()
#undef SYCLTENSORCHIPPINGOPEXPR
+
+// TensorLayoutSwapOp
+#define SYCLTENSORLAYOUTSWAPOPEXPR(CVQual)\
+template<typename OrigXprType, typename XprType, typename... Params>\
+struct ExprConstructor<CVQual TensorLayoutSwapOp <OrigXprType> , CVQual TensorLayoutSwapOp<XprType>, Params... >{\
+ typedef ExprConstructor<OrigXprType, XprType, Params...> my_xpr_type;\
+ typedef CVQual TensorLayoutSwapOp<typename my_xpr_type::Type> Type;\
+ my_xpr_type xprExpr;\
+ Type expr;\
+ template <typename FuncDetector>\
+ ExprConstructor(FuncDetector &funcD, const utility::tuple::Tuple<Params...> &t)\
+ : xprExpr(funcD.xprExpr, t), expr(xprExpr.expr) {}\
+};
+
+SYCLTENSORLAYOUTSWAPOPEXPR(const)
+SYCLTENSORLAYOUTSWAPOPEXPR()
+#undef SYCLTENSORLAYOUTSWAPOPEXPR
+
+
/// template deduction for \ref ExprConstructor struct
template <typename OrigExpr, typename IndexExpr, typename FuncD, typename... Params>
auto createDeviceExpression(FuncD &funcD, const utility::tuple::Tuple<Params...> &t)
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractAccessor.h b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractAccessor.h
index b512d43f6..213dd25ea 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractAccessor.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractAccessor.h
@@ -226,6 +226,21 @@ SYCLTENSORCHIPPINGOPEXTACC()
#undef SYCLTENSORCHIPPINGOPEXTACC
+// specialisation of the \ref ExtractAccessor struct when the node type is
+/// TensorLayoutSwapOp.
+#define SYCLTENSORLAYOUTSWAPOPEXTACC(CVQual)\
+template<typename XprType, typename Dev>\
+struct ExtractAccessor<TensorEvaluator<CVQual TensorLayoutSwapOp<XprType>, Dev> >{\
+ static inline auto getTuple(cl::sycl::handler& cgh, const TensorEvaluator<CVQual TensorLayoutSwapOp<XprType>, Dev>& eval)\
+ RETURN_CPP11(AccessorConstructor::getTuple(cgh, eval.impl()))\
+};
+
+SYCLTENSORLAYOUTSWAPOPEXTACC(const)
+SYCLTENSORLAYOUTSWAPOPEXTACC()
+#undef SYCLTENSORLAYOUTSWAPOPEXTACC
+
+
+
/// template deduction for \ref ExtractAccessor
template <typename Evaluator>
auto createTupleOfAccessors(cl::sycl::handler& cgh, const Evaluator& eval)
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractFunctors.h b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractFunctors.h
index ee020184b..1506e8189 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractFunctors.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractFunctors.h
@@ -39,7 +39,6 @@ template <typename Evaluator> struct FunctorExtractor{
EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
FunctorExtractor(const Evaluator& expr)
: m_dimensions(expr.dimensions()) {}
-
};
/// specialisation of the \ref FunctorExtractor struct when the node type does not require anything
@@ -143,19 +142,23 @@ SYCLEXTRFUNCASSIGNOP(const)
SYCLEXTRFUNCASSIGNOP()
#undef SYCLEXTRFUNCASSIGNOP
-/// specialisation of the \ref FunctorExtractor struct when the node type is
-/// TensorEvalToOp, This is an specialisation without OP so it has to be separated.
-#define SYCLEXTRFUNCEVALTOOP(CVQual)\
-template <typename RHSExpr, typename Dev>\
-struct FunctorExtractor<TensorEvaluator<CVQual TensorEvalToOp<RHSExpr>, Dev> > {\
- FunctorExtractor<TensorEvaluator<RHSExpr, Dev> > rhsExpr;\
- FunctorExtractor(const TensorEvaluator<CVQual TensorEvalToOp<RHSExpr>, Dev>& expr)\
- : rhsExpr(expr.impl()) {}\
+/// specialisation of the \ref FunctorExtractor struct when the node types are
+/// TensorEvalToOp, TensorLayoutSwapOp. This is an specialisation without OP so it has to be separated.
+#define SYCLEXTRFUNCEVALTOOPSWAPLAYOUT(CVQual, ExprNode)\
+template <typename Expr, typename Dev>\
+struct FunctorExtractor<TensorEvaluator<CVQual ExprNode<Expr>, Dev> > {\
+ FunctorExtractor<TensorEvaluator<Expr, Dev> > xprExpr;\
+ FunctorExtractor(const TensorEvaluator<CVQual ExprNode<Expr>, Dev>& expr)\
+ : xprExpr(expr.impl()) {}\
};
-
-SYCLEXTRFUNCEVALTOOP(const)
-SYCLEXTRFUNCEVALTOOP()
-#undef SYCLEXTRFUNCEVALTOOP
+//TensorEvalToOp
+SYCLEXTRFUNCEVALTOOPSWAPLAYOUT(const, TensorEvalToOp)
+SYCLEXTRFUNCEVALTOOPSWAPLAYOUT(, TensorEvalToOp)
+// TensorLayoutSwapOp
+SYCLEXTRFUNCEVALTOOPSWAPLAYOUT(const, TensorLayoutSwapOp)
+SYCLEXTRFUNCEVALTOOPSWAPLAYOUT(, TensorLayoutSwapOp)
+
+#undef SYCLEXTRFUNCEVALTOOPSWAPLAYOUT
template<typename Dim, size_t NumOutputDim> struct DimConstr {
template<typename InDim>
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclLeafCount.h b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclLeafCount.h
index a1c112f4d..15729310d 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclLeafCount.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclLeafCount.h
@@ -94,15 +94,17 @@ SYCLFORCEDEVALLEAFCOUNT()
#undef SYCLFORCEDEVALLEAFCOUNT
/// specialisation of the \ref LeafCount struct when the node type is TensorEvalToOp
-#define EVALTOLEAFCOUNT(CVQual)\
+#define EVALTOLAYOUTSWAPLEAFCOUNT(CVQual , ExprNode, Num)\
template <typename Expr>\
-struct LeafCount<CVQual TensorEvalToOp<Expr> > {\
- static const size_t Count = 1 + CategoryCount<Expr>::Count;\
+struct LeafCount<CVQual ExprNode<Expr> > {\
+ static const size_t Count = Num + CategoryCount<Expr>::Count;\
};
-EVALTOLEAFCOUNT(const)
-EVALTOLEAFCOUNT()
-#undef EVALTOLEAFCOUNT
+EVALTOLAYOUTSWAPLEAFCOUNT(const, TensorEvalToOp, 1)
+EVALTOLAYOUTSWAPLEAFCOUNT(, TensorEvalToOp, 1)
+EVALTOLAYOUTSWAPLEAFCOUNT(const, TensorLayoutSwapOp, 0)
+EVALTOLAYOUTSWAPLEAFCOUNT(, TensorLayoutSwapOp, 0)
+#undef EVALTOLAYOUTSWAPLEAFCOUNT
/// specialisation of the \ref LeafCount struct when the node type is const TensorReductionOp
#define REDUCTIONLEAFCOUNT(CVQual)\
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclPlaceHolderExpr.h b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclPlaceHolderExpr.h
index 74566dcee..ba0d17e0c 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclPlaceHolderExpr.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclPlaceHolderExpr.h
@@ -144,16 +144,19 @@ FORCEDEVAL()
#undef FORCEDEVAL
/// specialisation of the \ref PlaceHolderExpression when the node is
-/// TensorEvalToOp
-#define EVALTO(CVQual)\
+/// TensorEvalToOp, TensorLayoutSwapOp
+#define EVALTOLAYOUTSWAP(CVQual, ExprNode)\
template <typename Expr, size_t N>\
-struct PlaceHolderExpression<CVQual TensorEvalToOp<Expr>, N> {\
- typedef CVQual TensorEvalToOp<typename CalculateIndex <N, Expr>::ArgType> Type;\
+struct PlaceHolderExpression<CVQual ExprNode<Expr>, N> {\
+ typedef CVQual ExprNode<typename CalculateIndex <N, Expr>::ArgType> Type;\
};
-EVALTO(const)
-EVALTO()
-#undef EVALTO
+EVALTOLAYOUTSWAP(const, TensorEvalToOp)
+EVALTOLAYOUTSWAP(, TensorEvalToOp)
+EVALTOLAYOUTSWAP(const, TensorLayoutSwapOp)
+EVALTOLAYOUTSWAP(, TensorLayoutSwapOp)
+
+#undef EVALTOLAYOUTSWAP
/// specialisation of the \ref PlaceHolderExpression when the node is
diff --git a/unsupported/test/CMakeLists.txt b/unsupported/test/CMakeLists.txt
index d01233fb2..57580f805 100644
--- a/unsupported/test/CMakeLists.txt
+++ b/unsupported/test/CMakeLists.txt
@@ -168,6 +168,7 @@ if(EIGEN_TEST_CXX11)
ei_add_test_sycl(cxx11_tensor_striding_sycl "-std=c++11")
ei_add_test_sycl(cxx11_tensor_chipping_sycl "-std=c++11")
ei_add_test_sycl(cxx11_tensor_patch_sycl "-std=c++11")
+ ei_add_test_sycl(cxx11_tensor_layout_swap_sycl "-std=c++11")
endif(EIGEN_TEST_SYCL)
# It should be safe to always run these tests as there is some fallback code for
# older compiler that don't support cxx11.
diff --git a/unsupported/test/cxx11_tensor_layout_swap_sycl.cpp b/unsupported/test/cxx11_tensor_layout_swap_sycl.cpp
new file mode 100644
index 000000000..9e8db8b4b
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_layout_swap_sycl.cpp
@@ -0,0 +1,126 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2016
+// Mehdi Goli Codeplay Software Ltd.
+// Ralph Potter Codeplay Software Ltd.
+// Luke Iwanski Codeplay Software Ltd.
+// Contact: <eigen@codeplay.com>
+// Benoit Steiner <benoit.steiner.goog@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#define EIGEN_TEST_NO_LONGDOUBLE
+#define EIGEN_TEST_NO_COMPLEX
+#define EIGEN_TEST_FUNC cxx11_tensor_layout_swap_sycl
+#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int64_t
+#define EIGEN_USE_SYCL
+
+#include "main.h"
+
+#include <Eigen/CXX11/Tensor>
+
+using Eigen::Tensor;
+
+template <typename DataType, typename IndexType>
+static void test_simple_swap_sycl(const Eigen::SyclDevice& sycl_device)
+{
+ IndexType sizeDim1 = 2;
+ IndexType sizeDim2 = 3;
+ IndexType sizeDim3 = 7;
+ array<IndexType, 3> tensorColRange = {{sizeDim1, sizeDim2, sizeDim3}};
+ array<IndexType, 3> tensorRowRange = {{sizeDim3, sizeDim2, sizeDim1}};
+
+
+ Tensor<DataType, 3, ColMajor, IndexType> tensor1(tensorColRange);
+ Tensor<DataType, 3, RowMajor, IndexType> tensor2(tensorRowRange);
+ tensor1.setRandom();
+
+ DataType* gpu_data1 = static_cast<DataType*>(sycl_device.allocate(tensor1.size()*sizeof(DataType)));
+ DataType* gpu_data2 = static_cast<DataType*>(sycl_device.allocate(tensor2.size()*sizeof(DataType)));
+ TensorMap<Tensor<DataType, 3, ColMajor, IndexType>> gpu1(gpu_data1, tensorColRange);
+ TensorMap<Tensor<DataType, 3, RowMajor, IndexType>> gpu2(gpu_data2, tensorRowRange);
+
+ sycl_device.memcpyHostToDevice(gpu_data1, tensor1.data(),(tensor1.size())*sizeof(DataType));
+ gpu2.device(sycl_device)=gpu1.swap_layout();
+ sycl_device.memcpyDeviceToHost(tensor2.data(), gpu_data2,(tensor2.size())*sizeof(DataType));
+
+
+// Tensor<float, 3, ColMajor> tensor(2,3,7);
+ //tensor.setRandom();
+
+// Tensor<float, 3, RowMajor> tensor2 = tensor.swap_layout();
+ VERIFY_IS_EQUAL(tensor1.dimension(0), tensor2.dimension(2));
+ VERIFY_IS_EQUAL(tensor1.dimension(1), tensor2.dimension(1));
+ VERIFY_IS_EQUAL(tensor1.dimension(2), tensor2.dimension(0));
+
+ for (IndexType i = 0; i < 2; ++i) {
+ for (IndexType j = 0; j < 3; ++j) {
+ for (IndexType k = 0; k < 7; ++k) {
+ VERIFY_IS_EQUAL(tensor1(i,j,k), tensor2(k,j,i));
+ }
+ }
+ }
+ sycl_device.deallocate(gpu_data1);
+ sycl_device.deallocate(gpu_data2);
+}
+
+template <typename DataType, typename IndexType>
+static void test_swap_as_lvalue_sycl(const Eigen::SyclDevice& sycl_device)
+{
+
+ IndexType sizeDim1 = 2;
+ IndexType sizeDim2 = 3;
+ IndexType sizeDim3 = 7;
+ array<IndexType, 3> tensorColRange = {{sizeDim1, sizeDim2, sizeDim3}};
+ array<IndexType, 3> tensorRowRange = {{sizeDim3, sizeDim2, sizeDim1}};
+
+ Tensor<DataType, 3, ColMajor, IndexType> tensor1(tensorColRange);
+ Tensor<DataType, 3, RowMajor, IndexType> tensor2(tensorRowRange);
+ tensor1.setRandom();
+
+ DataType* gpu_data1 = static_cast<DataType*>(sycl_device.allocate(tensor1.size()*sizeof(DataType)));
+ DataType* gpu_data2 = static_cast<DataType*>(sycl_device.allocate(tensor2.size()*sizeof(DataType)));
+ TensorMap<Tensor<DataType, 3, ColMajor, IndexType>> gpu1(gpu_data1, tensorColRange);
+ TensorMap<Tensor<DataType, 3, RowMajor, IndexType>> gpu2(gpu_data2, tensorRowRange);
+
+ sycl_device.memcpyHostToDevice(gpu_data1, tensor1.data(),(tensor1.size())*sizeof(DataType));
+ gpu2.swap_layout().device(sycl_device)=gpu1;
+ sycl_device.memcpyDeviceToHost(tensor2.data(), gpu_data2,(tensor2.size())*sizeof(DataType));
+
+
+// Tensor<float, 3, ColMajor> tensor(2,3,7);
+// tensor.setRandom();
+
+ //Tensor<float, 3, RowMajor> tensor2(7,3,2);
+// tensor2.swap_layout() = tensor;
+ VERIFY_IS_EQUAL(tensor1.dimension(0), tensor2.dimension(2));
+ VERIFY_IS_EQUAL(tensor1.dimension(1), tensor2.dimension(1));
+ VERIFY_IS_EQUAL(tensor1.dimension(2), tensor2.dimension(0));
+
+ for (IndexType i = 0; i < 2; ++i) {
+ for (IndexType j = 0; j < 3; ++j) {
+ for (IndexType k = 0; k < 7; ++k) {
+ VERIFY_IS_EQUAL(tensor1(i,j,k), tensor2(k,j,i));
+ }
+ }
+ }
+ sycl_device.deallocate(gpu_data1);
+ sycl_device.deallocate(gpu_data2);
+}
+
+
+template<typename DataType, typename dev_Selector> void sycl_tensor_layout_swap_test_per_device(dev_Selector s){
+ QueueInterface queueInterface(s);
+ auto sycl_device = Eigen::SyclDevice(&queueInterface);
+ test_simple_swap_sycl<DataType, int64_t>(sycl_device);
+ test_swap_as_lvalue_sycl<DataType, int64_t>(sycl_device);
+}
+void test_cxx11_tensor_layout_swap_sycl()
+{
+ for (const auto& device :Eigen::get_sycl_supported_devices()) {
+ CALL_SUBTEST(sycl_tensor_layout_swap_test_per_device<float>(device));
+ }
+}
diff --git a/unsupported/test/cxx11_tensor_patch_sycl.cpp b/unsupported/test/cxx11_tensor_patch_sycl.cpp
index b75219a5b..88a29cb31 100644
--- a/unsupported/test/cxx11_tensor_patch_sycl.cpp
+++ b/unsupported/test/cxx11_tensor_patch_sycl.cpp
@@ -12,7 +12,6 @@
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
#define EIGEN_TEST_NO_LONGDOUBLE
#define EIGEN_TEST_NO_COMPLEX
#define EIGEN_TEST_FUNC cxx11_tensor_patch_sycl
@@ -80,10 +79,12 @@ static void test_simple_patch_sycl(const Eigen::SyclDevice& sycl_device){
for (int i = 0; i < tensor.size(); ++i) {
VERIFY_IS_EQUAL(tensor.data()[i], no_patch.data()[i]);
}
+
patch_dims[0] = 2;
patch_dims[1] = 3;
patch_dims[2] = 5;
patch_dims[3] = 7;
+
if (DataLayout == ColMajor) {
patchTensorRange = {{sizeDim1,sizeDim2,sizeDim3,sizeDim4,1}};
}else{
@@ -114,15 +115,11 @@ static void test_simple_patch_sycl(const Eigen::SyclDevice& sycl_device){
for (int i = 0; i < tensor.size(); ++i) {
VERIFY_IS_EQUAL(tensor.data()[i], single_patch.data()[i]);
}
-
-
-
-
-
patch_dims[0] = 1;
patch_dims[1] = 2;
patch_dims[2] = 2;
patch_dims[3] = 1;
+
if (DataLayout == ColMajor) {
patchTensorRange = {{1,2,2,1,2*2*4*7}};
}else{