aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--Eigen/Core10
-rw-r--r--unsupported/Eigen/CXX11/Tensor15
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h49
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorEvalTo.h4
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h2
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h81
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorReductionSycl.h242
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorSycl.h17
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorSyclConvertToDeviceExpression.h12
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorSyclExprConstructor.h40
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractAccessor.h73
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractFunctors.h23
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorSyclLeafCount.h41
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorSyclPlaceHolder.h99
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorSyclPlaceHolderExpr.h31
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorSyclRun.h17
-rw-r--r--unsupported/test/CMakeLists.txt9
-rw-r--r--unsupported/test/cxx11_tensor_reduction_sycl.cpp147
18 files changed, 663 insertions, 249 deletions
diff --git a/Eigen/Core b/Eigen/Core
index 2d2616254..8ce3d4d06 100644
--- a/Eigen/Core
+++ b/Eigen/Core
@@ -14,16 +14,6 @@
// first thing Eigen does: stop the compiler from committing suicide
#include "src/Core/util/DisableStupidWarnings.h"
-/// This will no longer be needed after the next release of the computecppCE
-#ifdef EIGEN_USE_SYCL
-#undef min
-#undef max
-#undef isnan
-#undef isinf
-#undef isfinite
-#include <SYCL/sycl.hpp>
-#endif
-
// Handle NVCC/CUDA/SYCL
#if defined(__CUDACC__) || defined(__SYCL_DEVICE_ONLY__)
// Do not try asserts on CUDA and SYCL!
diff --git a/unsupported/Eigen/CXX11/Tensor b/unsupported/Eigen/CXX11/Tensor
index 388976d2e..1cf19d6c1 100644
--- a/unsupported/Eigen/CXX11/Tensor
+++ b/unsupported/Eigen/CXX11/Tensor
@@ -13,6 +13,15 @@
#include "../../../Eigen/Core"
+#ifdef EIGEN_USE_SYCL
+#undef min
+#undef max
+#undef isnan
+#undef isinf
+#undef isfinite
+#include <SYCL/sycl.hpp>
+#endif
+
#include <Eigen/src/Core/util/DisableStupidWarnings.h>
#include "../SpecialFunctions"
@@ -69,10 +78,6 @@ typedef unsigned __int64 uint64_t;
#endif
#endif
-#ifdef EIGEN_USE_SYCL
-#include <SYCL/sycl.hpp>
-#endif
-
#include "src/Tensor/TensorMacros.h"
#include "src/Tensor/TensorForwardDeclarations.h"
#include "src/Tensor/TensorMeta.h"
@@ -81,7 +86,6 @@ typedef unsigned __int64 uint64_t;
#include "src/Tensor/TensorDeviceDefault.h"
#include "src/Tensor/TensorDeviceThreadPool.h"
#include "src/Tensor/TensorDeviceCuda.h"
-#include "src/Tensor/TensorSycl.h"
#include "src/Tensor/TensorDeviceSycl.h"
#include "src/Tensor/TensorIndexList.h"
#include "src/Tensor/TensorDimensionList.h"
@@ -128,6 +132,7 @@ typedef unsigned __int64 uint64_t;
#include "src/Tensor/TensorAssign.h"
#include "src/Tensor/TensorScan.h"
+#include "src/Tensor/TensorSycl.h"
#include "src/Tensor/TensorExecutor.h"
#include "src/Tensor/TensorDevice.h"
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h b/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h
index bfd36f5aa..4231a11ff 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h
@@ -1,12 +1,11 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2016 Benoit Steiner <benoit.steiner.goog@gmail.com>
// Mehdi Goli Codeplay Software Ltd.
// Ralph Potter Codeplay Software Ltd.
// Luke Iwanski Codeplay Software Ltd.
-// Cummins Chris PhD student at The University of Edinburgh.
// Contact: <eigen@codeplay.com>
+// Copyright (C) 2016 Benoit Steiner <benoit.steiner.goog@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
@@ -25,12 +24,8 @@ namespace Eigen {
template <typename T, bool MapAllocator>
struct BufferT {
using Type = cl::sycl::buffer<T, 1, cl::sycl::map_allocator<T>>;
- static inline void add_sycl_buffer(
- const T *ptr, size_t num_bytes,
- std::map<const void *, std::shared_ptr<void>> &buffer_map) {
- buffer_map.insert(std::pair<const void *, std::shared_ptr<void>>(
- ptr, std::shared_ptr<void>(std::make_shared<Type>(
- Type(const_cast<T *>(ptr), cl::sycl::range<1>(num_bytes))))));
+ static inline void add_sycl_buffer(const T *ptr, size_t num_bytes,std::map<const void *, std::shared_ptr<void>> &buffer_map) {
+ buffer_map.insert(std::pair<const void *, std::shared_ptr<void>>(ptr, std::shared_ptr<void>(std::make_shared<Type>(Type(const_cast<T *>(ptr), cl::sycl::range<1>(num_bytes))))));
}
};
@@ -39,12 +34,8 @@ struct BufferT {
template <typename T>
struct BufferT<T, false> {
using Type = cl::sycl::buffer<T, 1>;
- static inline void add_sycl_buffer(
- const T *ptr, size_t num_bytes,
- std::map<const void *, std::shared_ptr<void>> &buffer_map) {
- buffer_map.insert(std::pair<const void *, std::shared_ptr<void>>(
- ptr, std::shared_ptr<void>(
- std::make_shared<Type>(Type(cl::sycl::range<1>(num_bytes))))));
+ static inline void add_sycl_buffer(const T *ptr, size_t num_bytes, std::map<const void *, std::shared_ptr<void>> &buffer_map) {
+ buffer_map.insert(std::pair<const void *, std::shared_ptr<void>>(ptr, std::shared_ptr<void>(std::make_shared<Type>(Type(cl::sycl::range<1>(num_bytes))))));
}
};
@@ -78,15 +69,20 @@ struct SyclDevice {
/// for that particular pointer.
template <cl::sycl::access::mode AcMd, bool MapAllocator, typename T>
inline cl::sycl::accessor<T, 1, AcMd, cl::sycl::access::target::global_buffer>
- get_sycl_accessor(size_t num_bytes, cl::sycl::handler &cgh,
- const T *ptr) const {
+ get_sycl_accessor(size_t num_bytes, cl::sycl::handler &cgh, const T * ptr) const {
+ return (get_sycl_buffer<MapAllocator,T>(num_bytes, ptr).template get_access<AcMd, cl::sycl::access::target::global_buffer>(cgh));
+ }
+
+template <bool MapAllocator, typename T>
+ inline typename BufferT<T, MapAllocator>::Type
+ get_sycl_buffer(size_t num_bytes,const T * ptr) const {
+ if(MapAllocator && !ptr){
+ eigen_assert("pointer with map_Allocator cannot be null. Please initialise the input pointer"); }
auto it = buffer_map.find(ptr);
if (it == buffer_map.end()) {
BufferT<T, MapAllocator>::add_sycl_buffer(ptr, num_bytes, buffer_map);
}
- return (
- ((typename BufferT<T, MapAllocator>::Type *)(buffer_map.at(ptr).get()))
- ->template get_access<AcMd>(cgh));
+ return (*((typename BufferT<T, MapAllocator>::Type*)((buffer_map.at(ptr).get()))));
}
/// allocating memory on the cpu
@@ -100,22 +96,21 @@ struct SyclDevice {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void deallocate(void *buffer) const {
internal::aligned_free(buffer);
}
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpy(void *dst, const void *src,
- size_t n) const {
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpy(void *dst, const void *src, size_t n) const {
::memcpy(dst, src, n);
}
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpyHostToDevice(
- void *dst, const void *src, size_t n) const {
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpyHostToDevice(void *dst, const void *src, size_t n) const {
memcpy(dst, src, n);
}
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpyDeviceToHost(
- void *dst, const void *src, size_t n) const {
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpyDeviceToHost(void *dst, const void *src, size_t n) const {
memcpy(dst, src, n);
}
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memset(void *buffer, int c,
- size_t n) const {
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memset(void *buffer, int c, size_t n) const {
::memset(buffer, c, n);
}
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int majorDeviceVersion() const {
+ return 1;
+ }
};
} // end namespace Eigen
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorEvalTo.h b/unsupported/Eigen/CXX11/src/Tensor/TensorEvalTo.h
index 68d14a7e5..06987132b 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorEvalTo.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorEvalTo.h
@@ -47,13 +47,13 @@ struct traits<TensorEvalToOp<XprType, MakePointer_> >
template<typename XprType, template <class> class MakePointer_>
struct eval<TensorEvalToOp<XprType, MakePointer_>, Eigen::Dense>
{
- typedef const TensorEvalToOp<XprType>& type;
+ typedef const TensorEvalToOp<XprType, MakePointer_>& type;
};
template<typename XprType, template <class> class MakePointer_>
struct nested<TensorEvalToOp<XprType, MakePointer_>, 1, typename eval<TensorEvalToOp<XprType, MakePointer_> >::type>
{
- typedef TensorEvalToOp<XprType> type;
+ typedef TensorEvalToOp<XprType, MakePointer_> type;
};
} // end namespace internal
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h b/unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h
index 6497b1830..52b803d7f 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h
@@ -33,7 +33,7 @@ template<typename UnaryOp, typename XprType> class TensorCwiseUnaryOp;
template<typename BinaryOp, typename LeftXprType, typename RightXprType> class TensorCwiseBinaryOp;
template<typename TernaryOp, typename Arg1XprType, typename Arg2XprType, typename Arg3XprType> class TensorCwiseTernaryOp;
template<typename IfXprType, typename ThenXprType, typename ElseXprType> class TensorSelectOp;
-template<typename Op, typename Dims, typename XprType> class TensorReductionOp;
+template<typename Op, typename Dims, typename XprType, template <class> class MakePointer_ = MakePointer > class TensorReductionOp;
template<typename XprType> class TensorIndexTupleOp;
template<typename ReduceOp, typename Dims, typename XprType> class TensorTupleReducerOp;
template<typename Axis, typename LeftXprType, typename RightXprType> class TensorConcatenationOp;
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h b/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h
index d34ff98b0..367bccf63 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h
@@ -2,6 +2,7 @@
// for linear algebra.
//
// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
+// Copyright (C) 2016 Mehdi Goli, Codeplay Software Ltd <eigen@codeplay.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -20,8 +21,8 @@ namespace Eigen {
*/
namespace internal {
-template<typename Op, typename Dims, typename XprType>
-struct traits<TensorReductionOp<Op, Dims, XprType> >
+ template<typename Op, typename Dims, typename XprType,template <class> class MakePointer_ >
+ struct traits<TensorReductionOp<Op, Dims, XprType, MakePointer_> >
: traits<XprType>
{
typedef traits<XprType> XprTraits;
@@ -31,18 +32,24 @@ struct traits<TensorReductionOp<Op, Dims, XprType> >
typedef typename XprType::Nested Nested;
static const int NumDimensions = XprTraits::NumDimensions - array_size<Dims>::value;
static const int Layout = XprTraits::Layout;
+
+ template <class T> struct MakePointer {
+ // Intermediate typedef to workaround MSVC issue.
+ typedef MakePointer_<T> MakePointerT;
+ typedef typename MakePointerT::Type Type;
+ };
};
-template<typename Op, typename Dims, typename XprType>
-struct eval<TensorReductionOp<Op, Dims, XprType>, Eigen::Dense>
+template<typename Op, typename Dims, typename XprType, template <class> class MakePointer_>
+struct eval<TensorReductionOp<Op, Dims, XprType, MakePointer_>, Eigen::Dense>
{
- typedef const TensorReductionOp<Op, Dims, XprType>& type;
+ typedef const TensorReductionOp<Op, Dims, XprType, MakePointer_>& type;
};
-template<typename Op, typename Dims, typename XprType>
-struct nested<TensorReductionOp<Op, Dims, XprType>, 1, typename eval<TensorReductionOp<Op, Dims, XprType> >::type>
+template<typename Op, typename Dims, typename XprType, template <class> class MakePointer_>
+struct nested<TensorReductionOp<Op, Dims, XprType, MakePointer_>, 1, typename eval<TensorReductionOp<Op, Dims, XprType, MakePointer_> >::type>
{
- typedef TensorReductionOp<Op, Dims, XprType> type;
+ typedef TensorReductionOp<Op, Dims, XprType, MakePointer_> type;
};
@@ -339,8 +346,8 @@ __global__ void OuterReductionKernel(R, const S, I, I, typename S::CoeffReturnTy
} // end namespace internal
-template <typename Op, typename Dims, typename XprType>
-class TensorReductionOp : public TensorBase<TensorReductionOp<Op, Dims, XprType>, ReadOnlyAccessors> {
+template <typename Op, typename Dims, typename XprType, template <class> class MakePointer_>
+class TensorReductionOp : public TensorBase<TensorReductionOp<Op, Dims, XprType, MakePointer_>, ReadOnlyAccessors> {
public:
typedef typename Eigen::internal::traits<TensorReductionOp>::Scalar Scalar;
typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
@@ -371,18 +378,19 @@ class TensorReductionOp : public TensorBase<TensorReductionOp<Op, Dims, XprType>
// Eval as rvalue
-template<typename Op, typename Dims, typename ArgType, typename Device>
-struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType>, Device>
+template<typename Op, typename Dims, typename ArgType, template <class> class MakePointer_, typename Device>
+struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType, MakePointer_>, Device>
{
- typedef TensorReductionOp<Op, Dims, ArgType> XprType;
+ typedef TensorReductionOp<Op, Dims, ArgType, MakePointer_> XprType;
typedef typename XprType::Index Index;
+ typedef ArgType ChildType;
typedef typename TensorEvaluator<ArgType, Device>::Dimensions InputDimensions;
static const int NumInputDims = internal::array_size<InputDimensions>::value;
static const int NumReducedDims = internal::array_size<Dims>::value;
static const int NumOutputDims = NumInputDims - NumReducedDims;
typedef typename internal::conditional<NumOutputDims==0, Sizes<>, DSizes<Index, NumOutputDims> >::type Dimensions;
typedef typename XprType::Scalar Scalar;
- typedef TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType>, Device> Self;
+ typedef TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType, MakePointer_>, Device> Self;
static const bool InputPacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess;
typedef typename internal::remove_const<typename XprType::CoeffReturnType>::type CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
@@ -401,7 +409,7 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType>, Device>
static const bool RunningFullReduction = (NumOutputDims==0);
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
- : m_impl(op.expression(), device), m_reducer(op.reducer()), m_result(NULL), m_device(device)
+ : m_impl(op.expression(), device), m_reducer(op.reducer()), m_result(NULL), m_device(device), m_xpr_dims(op.dims())
{
EIGEN_STATIC_ASSERT((NumInputDims >= NumReducedDims), YOU_MADE_A_PROGRAMMING_MISTAKE);
EIGEN_STATIC_ASSERT((!ReducingInnerMostDims | !PreservingInnerMostDims | (NumReducedDims == NumInputDims)),
@@ -471,25 +479,35 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType>, Device>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
- EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool evalSubExprsIfNeeded(CoeffReturnType* data) {
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool evalSubExprsIfNeeded(typename MakePointer_<CoeffReturnType>::Type data) {
m_impl.evalSubExprsIfNeeded(NULL);
// Use the FullReducer if possible.
- if (RunningFullReduction &&
+ if ((RunningFullReduction && RunningOnSycl) ||(RunningFullReduction &&
internal::FullReducer<Self, Op, Device>::HasOptimizedImplementation &&
((RunningOnGPU && (m_device.majorDeviceVersion() >= 3)) ||
- !RunningOnGPU)) {
+ !RunningOnGPU))) {
bool need_assign = false;
if (!data) {
m_result = static_cast<CoeffReturnType*>(m_device.allocate(sizeof(CoeffReturnType)));
data = m_result;
need_assign = true;
}
-
Op reducer(m_reducer);
internal::FullReducer<Self, Op, Device>::run(*this, reducer, m_device, data);
return need_assign;
}
+ else if(RunningOnSycl){
+ const Index num_values_to_reduce = internal::array_prod(m_reducedDims);
+ const Index num_coeffs_to_preserve = internal::array_prod(m_dimensions);
+ if (!data) {
+ data = static_cast<CoeffReturnType*>(m_device.allocate(sizeof(CoeffReturnType) * num_coeffs_to_preserve));
+ m_result = data;
+ }
+ Op reducer(m_reducer);
+ internal::InnerReducer<Self, Op, Device>::run(*this, reducer, m_device, data, num_values_to_reduce, num_coeffs_to_preserve);
+ return (m_result != NULL);
+ }
// Attempt to use an optimized reduction.
else if (RunningOnGPU && (m_device.majorDeviceVersion() >= 3)) {
@@ -572,7 +590,7 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType>, Device>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
{
- if ((RunningFullReduction || RunningOnGPU) && m_result) {
+ if ((RunningOnSycl || RunningFullReduction || RunningOnGPU) && m_result) {
return *(m_result + index);
}
Op reducer(m_reducer);
@@ -644,7 +662,20 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType>, Device>
}
}
- EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; }
+ /// required by sycl in order to extract the output accessor
+#ifndef EIGEN_USE_SYCL
+ EIGEN_DEVICE_FUNC typename MakePointer_<Scalar>::Type data() const { return NULL; }
+#else
+ EIGEN_DEVICE_FUNC typename MakePointer_<Scalar>::Type data() const {
+ return m_result; }
+#endif
+ /// required by sycl in order to extract the accessor
+ const TensorEvaluator<ArgType, Device>& impl() const { return m_impl; }
+ /// added for sycl in order to construct the buffer from the sycl device
+ const Device& device() const{return m_device;}
+ /// added for sycl in order to re-construct the reduction eval on the device for the sub-kernel
+ const Dims& xprDims() const {return m_xpr_dims;}
+
private:
template <int, typename, typename> friend struct internal::GenericDimReducer;
@@ -737,12 +768,18 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType>, Device>
// For full reductions
#if defined(EIGEN_USE_GPU) && defined(__CUDACC__)
static const bool RunningOnGPU = internal::is_same<Device, Eigen::GpuDevice>::value;
+ static const bool RunningOnSycl=false;
+#elif defined(EIGEN_USE_SYCL)
+static const bool RunningOnSycl = internal::is_same<typename internal::remove_all<Device>::type, Eigen::SyclDevice>::value;
+static const bool RunningOnGPU = false;
#else
static const bool RunningOnGPU = false;
+ static const bool RunningOnSycl=false;
#endif
- CoeffReturnType* m_result;
+ typename MakePointer_<CoeffReturnType>::Type m_result;
const Device& m_device;
+ const Dims& m_xpr_dims;
};
} // end namespace Eigen
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorReductionSycl.h b/unsupported/Eigen/CXX11/src/Tensor/TensorReductionSycl.h
new file mode 100644
index 000000000..1c89132db
--- /dev/null
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorReductionSycl.h
@@ -0,0 +1,242 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Mehdi Goli Codeplay Software Ltd.
+// Ralph Potter Codeplay Software Ltd.
+// Luke Iwanski Codeplay Software Ltd.
+// Contact: <eigen@codeplay.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+/*****************************************************************
+ * TensorSyclPlaceHolderExpr.h
+ *
+ * \brief:
+ * This is the specialisation of the placeholder expression based on the
+ * operation type
+ *
+*****************************************************************/
+
+#ifndef UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSOR_REDUCTION_SYCL_HPP
+#define UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSOR_REDUCTION_SYCL_HPP
+
+namespace Eigen {
+namespace internal {
+
+template<typename CoeffReturnType, typename KernelName> struct syclGenericBufferReducer{
+template<typename BufferTOut, typename BufferTIn>
+static void run(BufferTOut& bufOut, BufferTIn& bufI, const Eigen::SyclDevice& dev, size_t length, size_t local){
+ do {
+ auto f = [length, local, &bufOut, &bufI](cl::sycl::handler& h) mutable {
+ cl::sycl::nd_range<1> r{cl::sycl::range<1>{std::max(length, local)},
+ cl::sycl::range<1>{std::min(length, local)}};
+ /* Two accessors are used: one to the buffer that is being reduced,
+ * and a second to local memory, used to store intermediate data. */
+ auto aI =
+ bufI.template get_access<cl::sycl::access::mode::read_write>(h);
+ auto aOut =
+ bufOut.template get_access<cl::sycl::access::mode::discard_write>(h);
+ cl::sycl::accessor<CoeffReturnType, 1, cl::sycl::access::mode::read_write,
+ cl::sycl::access::target::local>
+ scratch(cl::sycl::range<1>(local), h);
+
+ /* The parallel_for invocation chosen is the variant with an nd_item
+ * parameter, since the code requires barriers for correctness. */
+ h.parallel_for<KernelName>(
+ r, [aOut, aI, scratch, local, length](cl::sycl::nd_item<1> id) {
+ size_t globalid = id.get_global(0);
+ size_t localid = id.get_local(0);
+ /* All threads collectively read from global memory into local.
+ * The barrier ensures all threads' IO is resolved before
+ * execution continues (strictly speaking, all threads within
+ * a single work-group - there is no co-ordination between
+ * work-groups, only work-items). */
+ if (globalid < length) {
+ scratch[localid] = aI[globalid];
+ }
+ id.barrier(cl::sycl::access::fence_space::local_space);
+
+ /* Apply the reduction operation between the current local
+ * id and the one on the other half of the vector. */
+ if (globalid < length) {
+ int min = (length < local) ? length : local;
+ for (size_t offset = min / 2; offset > 0; offset /= 2) {
+ if (localid < offset) {
+ scratch[localid] += scratch[localid + offset];
+ }
+ id.barrier(cl::sycl::access::fence_space::local_space);
+ }
+ /* The final result will be stored in local id 0. */
+ if (localid == 0) {
+ aI[id.get_group(0)] = scratch[localid];
+ if((length<=local) && globalid ==0){
+ aOut[globalid]=scratch[localid];
+ }
+ }
+ }
+ });
+ };
+ dev.m_queue.submit(f);
+ dev.m_queue.throw_asynchronous();
+
+ /* At this point, you could queue::wait_and_throw() to ensure that
+ * errors are caught quickly. However, this would likely impact
+ * performance negatively. */
+ length = length / local;
+
+ } while (length > 1);
+
+
+
+}
+
+};
+
+/// For now let's start with a full reducer
+/// Self is useless here because in expression construction we are going to treat reduction as a leafnode.
+/// we want to take reduction child and then build a construction and apply the full reducer function on it. Fullreducre applies the
+/// reduction operation on the child of the reduction. once it is done the reduction is an empty shell and can be thrown away and treated as
+// a leafNode.
+template <typename Self, typename Op, bool Vectorizable>
+struct FullReducer<Self, Op, const Eigen::SyclDevice, Vectorizable> {
+
+ typedef typename Self::CoeffReturnType CoeffReturnType;
+ static const bool HasOptimizedImplementation = false;
+
+ static void run(const Self& self, Op& reducer, const Eigen::SyclDevice& dev, CoeffReturnType* output) {
+ typedef const typename Self::ChildType HostExpr; /// this is the child of reduction
+ typedef typename TensorSycl::internal::createPlaceHolderExpression<HostExpr>::Type PlaceHolderExpr;
+ auto functors = TensorSycl::internal::extractFunctors(self.impl());
+ int red_factor =256; /// initial reduction. If the size is less than red_factor we only creates one thread.
+ size_t inputSize =self.impl().dimensions().TotalSize();
+ size_t rng = inputSize/red_factor; // the total number of thread initially is half the size of the input
+ size_t remaining = inputSize% red_factor;
+ if(rng ==0) {
+ red_factor=1;
+ };
+ size_t tileSize =dev.m_queue.get_device(). template get_info<cl::sycl::info::device::max_work_group_size>()/2;
+ size_t GRange=std::max((size_t )1, rng);
+
+ // convert global range to power of 2 for redecution
+ GRange--;
+ GRange |= GRange >> 1;
+ GRange |= GRange >> 2;
+ GRange |= GRange >> 4;
+ GRange |= GRange >> 8;
+ GRange |= GRange >> 16;
+#if __x86_64__ || __ppc64__ || _WIN64
+ GRange |= GRange >> 32;
+#endif
+ GRange++;
+ size_t outTileSize = tileSize;
+ /// if the shared memory is less than the GRange, we set shared_mem size to the TotalSize and in this case one kernel would be created for recursion to reduce all to one.
+ if (GRange < outTileSize) outTileSize=GRange;
+ // getting final out buffer at the moment the created buffer is true because there is no need for assign
+ auto out_buffer =dev.template get_sycl_buffer<true, typename Eigen::internal::remove_all<CoeffReturnType>::type>(self.dimensions().TotalSize(), output);
+ /// creating the shared memory for calculating reduction.
+ /// This one is used to collect all the reduced value of shared memory as we dont have global barrier on GPU. Once it is saved we can
+ /// recursively apply reduction on it in order to reduce the whole.
+ auto temp_global_buffer =cl::sycl::buffer<CoeffReturnType, 1>(cl::sycl::range<1>(GRange));
+ typedef typename Eigen::internal::remove_all<decltype(self.xprDims())>::type Dims;
+ Dims dims= self.xprDims();
+ Op functor = reducer;
+ dev.m_queue.submit([&](cl::sycl::handler &cgh) {
+ // create a tuple of accessors from Evaluator
+ auto tuple_of_accessors = TensorSycl::internal::createTupleOfAccessors(cgh, self.impl());
+ auto tmp_global_accessor = temp_global_buffer. template get_access<cl::sycl::access::mode::read_write, cl::sycl::access::target::global_buffer>(cgh);
+
+ cgh.parallel_for<PlaceHolderExpr>( cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(outTileSize)), [=](cl::sycl::nd_item<1> itemID) {
+ typedef typename TensorSycl::internal::ConvertToDeviceExpression<const HostExpr>::Type DevExpr;
+ auto device_expr = TensorSycl::internal::createDeviceExpression<DevExpr, PlaceHolderExpr>(functors, tuple_of_accessors);
+ /// reduction cannot be captured automatically through our device conversion recursion. The reason is that reduction has two behaviour
+ /// the first behaviour is when it is used as a root to lauch the sub-kernel. The second one is when it is treated as a leafnode to pass the
+ /// calculated result to its parent kernel. While the latter is automatically detected through our device expression generator. The former is created here.
+ const auto device_self_expr= TensorReductionOp<Op, Dims, decltype(device_expr.expr) ,MakeGlobalPointer>(device_expr.expr, dims, functor);
+ /// This is the evaluator for device_self_expr. This is exactly similar to the self which has been passed to run function. The difference is
+ /// the device_evaluator is detectable and recognisable on the device.
+ auto device_self_evaluator = Eigen::TensorEvaluator<decltype(device_self_expr), Eigen::DefaultDevice>(device_self_expr, Eigen::DefaultDevice());
+ /// const cast added as a naive solution to solve the qualifier drop error
+ auto globalid=itemID.get_global_linear_id();
+
+ if(globalid<rng)
+ tmp_global_accessor.get_pointer()[globalid]=InnerMostDimReducer<decltype(device_self_evaluator), Op, false>::reduce(device_self_evaluator, red_factor*globalid, red_factor, const_cast<Op&>(functor));
+ else
+ tmp_global_accessor.get_pointer()[globalid]=static_cast<CoeffReturnType>(0);
+
+ if(remaining!=0 && globalid==0 )
+ // this will add the rest of input buffer when the input size is not devidable to red_factor.
+ tmp_global_accessor.get_pointer()[globalid]+=InnerMostDimReducer<decltype(device_self_evaluator), Op, false>::reduce(device_self_evaluator, red_factor*(rng), remaining, const_cast<Op&>(functor));
+ });
+ });
+ dev.m_queue.throw_asynchronous();
+
+/// This is used to recursively reduce the tmp value to an element of 1;
+ syclGenericBufferReducer<CoeffReturnType,HostExpr>::run(out_buffer, temp_global_buffer,dev, GRange, outTileSize);
+ }
+
+};
+
+template <typename Self, typename Op>
+struct InnerReducer<Self, Op, const Eigen::SyclDevice> {
+
+ typedef typename Self::CoeffReturnType CoeffReturnType;
+ static const bool HasOptimizedImplementation = false;
+
+ static bool run(const Self& self, Op& reducer, const Eigen::SyclDevice& dev, CoeffReturnType* output, typename Self::Index , typename Self::Index num_coeffs_to_preserve) {
+ typedef const typename Self::ChildType HostExpr; /// this is the child of reduction
+ typedef typename TensorSycl::internal::createPlaceHolderExpression<HostExpr>::Type PlaceHolderExpr;
+ auto functors = TensorSycl::internal::extractFunctors(self.impl());
+
+ size_t tileSize =dev.m_queue.get_device(). template get_info<cl::sycl::info::device::max_work_group_size>()/2;
+
+ size_t GRange=num_coeffs_to_preserve;
+ if (tileSize>GRange) tileSize=GRange;
+ else if(GRange>tileSize){
+ size_t xMode = GRange % tileSize;
+ if (xMode != 0) GRange += (tileSize - xMode);
+ }
+ // getting final out buffer at the moment the created buffer is true because there is no need for assign
+ /// creating the shared memory for calculating reduction.
+ /// This one is used to collect all the reduced value of shared memory as we dont have global barrier on GPU. Once it is saved we can
+ /// recursively apply reduction on it in order to reduce the whole.
+ typedef typename Eigen::internal::remove_all<decltype(self.xprDims())>::type Dims;
+ Dims dims= self.xprDims();
+ Op functor = reducer;
+
+ dev.m_queue.submit([&](cl::sycl::handler &cgh) {
+ // create a tuple of accessors from Evaluator
+ auto tuple_of_accessors = TensorSycl::internal::createTupleOfAccessors(cgh, self.impl());
+ auto output_accessor = dev.template get_sycl_accessor<cl::sycl::access::mode::discard_write, true>(num_coeffs_to_preserve,cgh, output);
+
+ cgh.parallel_for<Self>( cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(tileSize)), [=](cl::sycl::nd_item<1> itemID) {
+ typedef typename TensorSycl::internal::ConvertToDeviceExpression<const HostExpr>::Type DevExpr;
+ auto device_expr = TensorSycl::internal::createDeviceExpression<DevExpr, PlaceHolderExpr>(functors, tuple_of_accessors);
+ /// reduction cannot be captured automatically through our device conversion recursion. The reason is that reduction has two behaviour
+ /// the first behaviour is when it is used as a root to lauch the sub-kernel. The second one is when it is treated as a leafnode to pass the
+ /// calculated result to its parent kernel. While the latter is automatically detected through our device expression generator. The former is created here.
+ const auto device_self_expr= TensorReductionOp<Op, Dims, decltype(device_expr.expr) ,MakeGlobalPointer>(device_expr.expr, dims, functor);
+ /// This is the evaluator for device_self_expr. This is exactly similar to the self which has been passed to run function. The difference is
+ /// the device_evaluator is detectable and recognisable on the device.
+ typedef Eigen::TensorEvaluator<decltype(device_self_expr), Eigen::DefaultDevice> DeiceSelf;
+ auto device_self_evaluator = Eigen::TensorEvaluator<decltype(device_self_expr), Eigen::DefaultDevice>(device_self_expr, Eigen::DefaultDevice());
+ /// const cast added as a naive solution to solve the qualifier drop error
+ auto globalid=itemID.get_global_linear_id();
+ if (globalid< static_cast<size_t>(num_coeffs_to_preserve)) {
+ typename DeiceSelf::CoeffReturnType accum = functor.initialize();
+ GenericDimReducer<DeiceSelf::NumReducedDims-1, DeiceSelf, Op>::reduce(device_self_evaluator, device_self_evaluator.firstInput(globalid),const_cast<Op&>(functor), &accum);
+ functor.finalize(accum);
+ output_accessor.get_pointer()[globalid]= accum;
+ }
+ });
+ });
+ dev.m_queue.throw_asynchronous();
+ return false;
+ }
+};
+
+} // end namespace internal
+} // namespace Eigen
+
+#endif // UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSOR_REDUCTION_SYCL_HPP
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorSycl.h b/unsupported/Eigen/CXX11/src/Tensor/TensorSycl.h
index da15f7942..bb8800d45 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorSycl.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorSycl.h
@@ -22,6 +22,13 @@ struct MakeGlobalPointer {
typedef typename cl::sycl::global_ptr<T>::pointer_t Type;
};
+// global pointer to set different attribute state for a class
+template <class T>
+struct MakeLocalPointer {
+ typedef typename cl::sycl::local_ptr<T>::pointer_t Type;
+};
+
+
namespace Eigen {
namespace TensorSycl {
namespace internal {
@@ -43,9 +50,7 @@ template<typename T> struct GetType<false, T>{
// tuple construction
#include "TensorSyclTuple.h"
-// This file contains the PlaceHolder that replaces the actual data
-#include "TensorSyclPlaceHolder.h"
-
+// counting number of leaf at compile time
#include "TensorSyclLeafCount.h"
// The index PlaceHolder takes the actual expression and replaces the actual
@@ -57,9 +62,6 @@ template<typename T> struct GetType<false, T>{
// creation of an accessor tuple from a tuple of SYCL buffers
#include "TensorSyclExtractAccessor.h"
-// actual data extraction using accessors
-//#include "GetDeviceData.h"
-
// this is used to change the address space type in tensor map for GPU
#include "TensorSyclConvertToDeviceExpression.h"
@@ -70,6 +72,9 @@ template<typename T> struct GetType<false, T>{
// this is used to construct the expression on the device
#include "TensorSyclExprConstructor.h"
+/// this is used for extracting tensor reduction
+#include "TensorReductionSycl.h"
+
// kernel execution using fusion
#include "TensorSyclRun.h"
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclConvertToDeviceExpression.h b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclConvertToDeviceExpression.h
index a94c30426..8729c86ee 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclConvertToDeviceExpression.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclConvertToDeviceExpression.h
@@ -102,6 +102,18 @@ KERNELBROKERCONVERT(, false, TensorForcedEvalOp)
KERNELBROKERCONVERT(const, true, TensorEvalToOp)
KERNELBROKERCONVERT(, false, TensorEvalToOp)
#undef KERNELBROKERCONVERT
+
+/// specialisation of the \ref ConvertToDeviceExpression struct when the node type is TensorReductionOp
+#define KERNELBROKERCONVERTREDUCTION(CVQual)\
+template <typename OP, typename Dim, typename subExpr, template <class> class MakePointer_>\
+struct ConvertToDeviceExpression<CVQual TensorReductionOp<OP, Dim, subExpr, MakePointer_> > {\
+ typedef CVQual TensorReductionOp<OP, Dim, typename ConvertToDeviceExpression<subExpr>::Type, MakeGlobalPointer> Type;\
+};
+
+KERNELBROKERCONVERTREDUCTION(const)
+KERNELBROKERCONVERTREDUCTION()
+#undef KERNELBROKERCONVERTREDUCTION
+
} // namespace internal
} // namespace TensorSycl
} // namespace Eigen
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExprConstructor.h b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExprConstructor.h
index 833d5e271..7ed3a3a56 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExprConstructor.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExprConstructor.h
@@ -33,8 +33,7 @@ struct EvalToLHSConstructor {
EvalToLHSConstructor(const utility::tuple::Tuple<Params...> &t): expr((&(*(utility::tuple::get<N>(t).get_pointer())))) {}
};
-/// \struct ExprConstructor is used to reconstruct the expression on the device
-/// and
+/// \struct ExprConstructor is used to reconstruct the expression on the device and
/// recreate the expression with MakeGlobalPointer containing the device address
/// space for the TensorMap pointers used in eval function.
/// It receives the original expression type, the functor of the node, the tuple
@@ -49,7 +48,7 @@ struct ExprConstructor;
template <typename Scalar_, int Options_, int Options2_, int Options3_, int NumIndices_, typename IndexType_,\
template <class> class MakePointer_, size_t N, typename... Params>\
struct ExprConstructor< CVQual TensorMap<Tensor<Scalar_, NumIndices_, Options_, IndexType_>, Options2_, MakeGlobalPointer>,\
-CVQual Eigen::internal::PlaceHolder<CVQual TensorMap<Tensor<Scalar_, NumIndices_, Options_, IndexType_>, Options3_, MakePointer_>, N>, Params...>{\
+CVQual PlaceHolder<CVQual TensorMap<Tensor<Scalar_, NumIndices_, Options_, IndexType_>, Options3_, MakePointer_>, N>, Params...>{\
typedef CVQual TensorMap<Tensor<Scalar_, NumIndices_, Options_, IndexType_>, Options2_, MakeGlobalPointer> Type;\
Type expr;\
template <typename FuncDetector>\
@@ -187,7 +186,7 @@ EVALTO()
#define FORCEDEVAL(CVQual)\
template <typename OrigExpr, typename DevExpr, size_t N, typename... Params>\
struct ExprConstructor<CVQual TensorForcedEvalOp<OrigExpr, MakeGlobalPointer>,\
-CVQual Eigen::internal::PlaceHolder<CVQual TensorForcedEvalOp<DevExpr>, N>, Params...> {\
+CVQual PlaceHolder<CVQual TensorForcedEvalOp<DevExpr>, N>, Params...> {\
typedef CVQual TensorMap<Tensor<typename TensorForcedEvalOp<DevExpr, MakeGlobalPointer>::Scalar,\
TensorForcedEvalOp<DevExpr, MakeGlobalPointer>::NumDimensions, 0, typename TensorForcedEvalOp<DevExpr>::Index>, 0, MakeGlobalPointer> Type;\
Type expr;\
@@ -200,14 +199,41 @@ FORCEDEVAL(const)
FORCEDEVAL()
#undef FORCEDEVAL
+template <bool Conds, size_t X , size_t Y > struct ValueCondition {
+ static const size_t Res =X;
+};
+template<size_t X, size_t Y> struct ValueCondition<false, X , Y> {
+ static const size_t Res =Y;
+};
+
+/// specialisation of the \ref ExprConstructor struct when the node type is TensorReductionOp
+#define SYCLREDUCTIONEXPR(CVQual)\
+template <typename OP, typename Dim, typename OrigExpr, typename DevExpr, size_t N, typename... Params>\
+struct ExprConstructor<CVQual TensorReductionOp<OP, Dim, OrigExpr, MakeGlobalPointer>,\
+CVQual PlaceHolder<CVQual TensorReductionOp<OP, Dim, DevExpr>, N>, Params...> {\
+ static const size_t NumIndices= ValueCondition< TensorReductionOp<OP, Dim, DevExpr, MakeGlobalPointer>::NumDimensions==0, 1, TensorReductionOp<OP, Dim, DevExpr, MakeGlobalPointer>::NumDimensions >::Res;\
+ typedef CVQual TensorMap<Tensor<typename TensorReductionOp<OP, Dim, DevExpr, MakeGlobalPointer>::Scalar,\
+ NumIndices, 0, typename TensorReductionOp<OP, Dim, DevExpr>::Index>, 0, MakeGlobalPointer> Type;\
+ Type expr;\
+ template <typename FuncDetector>\
+ ExprConstructor(FuncDetector &fd, const utility::tuple::Tuple<Params...> &t)\
+ : expr(Type((&(*(utility::tuple::get<N>(t).get_pointer()))), fd.dimensions())) {}\
+};
+
+SYCLREDUCTIONEXPR(const)
+SYCLREDUCTIONEXPR()
+#undef SYCLREDUCTIONEXPR
+
/// template deduction for \ref ExprConstructor struct
template <typename OrigExpr, typename IndexExpr, typename FuncD, typename... Params>
auto createDeviceExpression(FuncD &funcD, const utility::tuple::Tuple<Params...> &t)
-> decltype(ExprConstructor<OrigExpr, IndexExpr, Params...>(funcD, t)) {
return ExprConstructor<OrigExpr, IndexExpr, Params...>(funcD, t);
}
-}
-}
-} // namespace Eigen
+
+} /// namespace TensorSycl
+} /// namespace internal
+} /// namespace Eigen
+
#endif // UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSORSYCL_EXPR_CONSTRUCTOR_HPP
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractAccessor.h b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractAccessor.h
index ceec528ea..3af5f8cfc 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractAccessor.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractAccessor.h
@@ -56,10 +56,10 @@ struct AccessorConstructor{
-> decltype(utility::tuple::append(ExtractAccessor<Arg1>::getTuple(cgh, eval1),utility::tuple::append(ExtractAccessor<Arg2>::getTuple(cgh, eval2), ExtractAccessor<Arg3>::getTuple(cgh, eval3)))) {
return utility::tuple::append(ExtractAccessor<Arg1>::getTuple(cgh, eval1),utility::tuple::append(ExtractAccessor<Arg2>::getTuple(cgh, eval2), ExtractAccessor<Arg3>::getTuple(cgh, eval3)));
}
- template< cl::sycl::access::mode AcM, typename Arg> static inline auto getAccessor(cl::sycl::handler& cgh, Arg eval)
- -> decltype(utility::tuple::make_tuple( eval.device().template get_sycl_accessor<AcM, true,
+ template< cl::sycl::access::mode AcM, bool MapAllocator, typename Arg> static inline auto getAccessor(cl::sycl::handler& cgh, Arg eval)
+ -> decltype(utility::tuple::make_tuple( eval.device().template get_sycl_accessor<AcM, MapAllocator,
typename Eigen::internal::remove_all<typename Arg::CoeffReturnType>::type>(eval.dimensions().TotalSize(), cgh,eval.data()))){
- return utility::tuple::make_tuple(eval.device().template get_sycl_accessor<AcM, true, typename Eigen::internal::remove_all<typename Arg::CoeffReturnType>::type>(eval.dimensions().TotalSize(), cgh,eval.data()));
+ return utility::tuple::make_tuple(eval.device().template get_sycl_accessor<AcM, MapAllocator, typename Eigen::internal::remove_all<typename Arg::CoeffReturnType>::type>(eval.dimensions().TotalSize(), cgh,eval.data()));
}
};
@@ -73,14 +73,12 @@ struct ExtractAccessor<TensorEvaluator<const UnaryCategory<OP, RHSExpr>, Dev> >
}
};
-/// specialisation of the \ref ExtractAccessor struct when the node type is
-/// TensorCwiseNullaryOp, TensorCwiseUnaryOp and TensorBroadcastingOp
+/// specialisation of the \ref ExtractAccessor struct when the node type is TensorCwiseNullaryOp, TensorCwiseUnaryOp and TensorBroadcastingOp
template <template<class, class> class UnaryCategory, typename OP, typename RHSExpr, typename Dev>
struct ExtractAccessor<TensorEvaluator<UnaryCategory<OP, RHSExpr>, Dev> >
: ExtractAccessor<TensorEvaluator<const UnaryCategory<OP, RHSExpr>, Dev> > {};
-/// specialisation of the \ref ExtractAccessor struct when the node type is
-/// const TensorCwiseBinaryOp
+/// specialisation of the \ref ExtractAccessor struct when the node type is const TensorCwiseBinaryOp
template <template<class, class, class> class BinaryCategory, typename OP, typename LHSExpr, typename RHSExpr, typename Dev>
struct ExtractAccessor<TensorEvaluator<const BinaryCategory<OP, LHSExpr, RHSExpr>, Dev> > {
static inline auto getTuple(cl::sycl::handler& cgh, const TensorEvaluator<const BinaryCategory<OP, LHSExpr, RHSExpr>, Dev> eval)
@@ -88,9 +86,7 @@ struct ExtractAccessor<TensorEvaluator<const BinaryCategory<OP, LHSExpr, RHSExpr
return AccessorConstructor::getTuple(cgh, eval.left_impl(), eval.right_impl());
}
};
-
-/// specialisation of the \ref ExtractAccessor struct when the node type is
-/// TensorCwiseBinaryOp
+/// specialisation of the \ref ExtractAccessor struct when the node type is TensorCwiseBinaryOp
template <template<class, class, class> class BinaryCategory, typename OP, typename LHSExpr, typename RHSExpr, typename Dev>
struct ExtractAccessor<TensorEvaluator<BinaryCategory<OP, LHSExpr, RHSExpr>, Dev> >
: ExtractAccessor<TensorEvaluator<const BinaryCategory<OP, LHSExpr, RHSExpr>, Dev> >{};
@@ -105,8 +101,7 @@ struct ExtractAccessor<TensorEvaluator<const TernaryCategory<OP, Arg1Expr, Arg2E
}
};
-/// specialisation of the \ref ExtractAccessor struct when the node type is
-/// TensorCwiseTernaryOp
+/// specialisation of the \ref ExtractAccessor struct when the node type is TensorCwiseTernaryOp
template <template<class, class, class, class> class TernaryCategory, typename OP, typename Arg1Expr, typename Arg2Expr, typename Arg3Expr, typename Dev>
struct ExtractAccessor<TensorEvaluator<TernaryCategory<OP, Arg1Expr, Arg2Expr, Arg3Expr>, Dev> >
: ExtractAccessor<TensorEvaluator<const TernaryCategory<OP, Arg1Expr, Arg2Expr, Arg3Expr>, Dev> >{};
@@ -127,8 +122,7 @@ template <typename IfExpr, typename ThenExpr, typename ElseExpr, typename Dev>
struct ExtractAccessor<TensorEvaluator<TensorSelectOp<IfExpr, ThenExpr, ElseExpr>, Dev> >
: ExtractAccessor<TensorEvaluator<const TensorSelectOp<IfExpr, ThenExpr, ElseExpr>, Dev> >{};
-/// specialisation of the \ref ExtractAccessor struct when the node type is
-/// const TensorAssignOp
+/// specialisation of the \ref ExtractAccessor struct when the node type is const TensorAssignOp
template <typename LHSExpr, typename RHSExpr, typename Dev>
struct ExtractAccessor<TensorEvaluator<const TensorAssignOp<LHSExpr, RHSExpr>, Dev> > {
static inline auto getTuple(cl::sycl::handler& cgh, const TensorEvaluator<const TensorAssignOp<LHSExpr, RHSExpr>, Dev> eval)
@@ -137,65 +131,74 @@ struct ExtractAccessor<TensorEvaluator<const TensorAssignOp<LHSExpr, RHSExpr>, D
}
};
-/// specialisation of the \ref ExtractAccessor struct when the node type is
-/// TensorAssignOp
+/// specialisation of the \ref ExtractAccessor struct when the node type is TensorAssignOp
template <typename LHSExpr, typename RHSExpr, typename Dev>
struct ExtractAccessor<TensorEvaluator<TensorAssignOp<LHSExpr, RHSExpr>, Dev> >
: ExtractAccessor<TensorEvaluator<const TensorAssignOp<LHSExpr, RHSExpr>, Dev> >{};
-/// specialisation of the \ref ExtractAccessor struct when the node type is
-/// const TensorMap
+/// specialisation of the \ref ExtractAccessor struct when the node type is const TensorMap
#define TENSORMAPEXPR(CVQual, ACCType)\
template <typename PlainObjectType, int Options_, typename Dev>\
struct ExtractAccessor<TensorEvaluator<CVQual TensorMap<PlainObjectType, Options_>, Dev> > {\
static inline auto getTuple(cl::sycl::handler& cgh,const TensorEvaluator<CVQual TensorMap<PlainObjectType, Options_>, Dev> eval)\
- -> decltype(AccessorConstructor::template getAccessor<ACCType>(cgh, eval)){\
- return AccessorConstructor::template getAccessor<ACCType>(cgh, eval);\
+ -> decltype(AccessorConstructor::template getAccessor<ACCType, true>(cgh, eval)){\
+ return AccessorConstructor::template getAccessor<ACCType, true>(cgh, eval);\
}\
};
TENSORMAPEXPR(const, cl::sycl::access::mode::read)
TENSORMAPEXPR(, cl::sycl::access::mode::read_write)
#undef TENSORMAPEXPR
-/// specialisation of the \ref ExtractAccessor struct when the node type is
-/// const TensorForcedEvalOp
+/// specialisation of the \ref ExtractAccessor struct when the node type is const TensorForcedEvalOp
template <typename Expr, typename Dev>
struct ExtractAccessor<TensorEvaluator<const TensorForcedEvalOp<Expr>, Dev> > {
static inline auto getTuple(cl::sycl::handler& cgh, const TensorEvaluator<const TensorForcedEvalOp<Expr>, Dev> eval)
- -> decltype(AccessorConstructor::template getAccessor<cl::sycl::access::mode::read>(cgh, eval)){
- return AccessorConstructor::template getAccessor<cl::sycl::access::mode::read>(cgh, eval);
+ -> decltype(AccessorConstructor::template getAccessor<cl::sycl::access::mode::read, false>(cgh, eval)){
+ return AccessorConstructor::template getAccessor<cl::sycl::access::mode::read, false>(cgh, eval);
}
};
-/// specialisation of the \ref ExtractAccessor struct when the node type is
-/// TensorForcedEvalOp
+/// specialisation of the \ref ExtractAccessor struct when the node type is TensorForcedEvalOp
template <typename Expr, typename Dev>
struct ExtractAccessor<TensorEvaluator<TensorForcedEvalOp<Expr>, Dev> >
: ExtractAccessor<TensorEvaluator<const TensorForcedEvalOp<Expr>, Dev> >{};
-/// specialisation of the \ref ExtractAccessor struct when the node type is
-/// const TensorEvalToOp
+/// specialisation of the \ref ExtractAccessor struct when the node type is const TensorEvalToOp
template <typename Expr, typename Dev>
struct ExtractAccessor<TensorEvaluator<const TensorEvalToOp<Expr>, Dev> > {
static inline auto getTuple(cl::sycl::handler& cgh,const TensorEvaluator<const TensorEvalToOp<Expr>, Dev> eval)
- -> decltype(utility::tuple::append(AccessorConstructor::template getAccessor<cl::sycl::access::mode::write>(cgh, eval), AccessorConstructor::getTuple(cgh, eval.impl()))){
- return utility::tuple::append(AccessorConstructor::template getAccessor<cl::sycl::access::mode::write>(cgh, eval), AccessorConstructor::getTuple(cgh, eval.impl()));
+ -> decltype(utility::tuple::append(AccessorConstructor::template getAccessor<cl::sycl::access::mode::write, false>(cgh, eval), AccessorConstructor::getTuple(cgh, eval.impl()))){
+ return utility::tuple::append(AccessorConstructor::template getAccessor<cl::sycl::access::mode::write, false>(cgh, eval), AccessorConstructor::getTuple(cgh, eval.impl()));
}
};
-/// specialisation of the \ref ExtractAccessor struct when the node type is
-/// TensorEvalToOp
+/// specialisation of the \ref ExtractAccessor struct when the node type is TensorEvalToOp
template <typename Expr, typename Dev>
struct ExtractAccessor<TensorEvaluator<TensorEvalToOp<Expr>, Dev> >
: ExtractAccessor<TensorEvaluator<const TensorEvalToOp<Expr>, Dev> >{};
+/// specialisation of the \ref ExtractAccessor struct when the node type is const TensorReductionOp
+template <typename OP, typename Dim, typename Expr, typename Dev>
+struct ExtractAccessor<TensorEvaluator<const TensorReductionOp<OP, Dim, Expr>, Dev> > {
+ static inline auto getTuple(cl::sycl::handler& cgh, const TensorEvaluator<const TensorReductionOp<OP, Dim, Expr>, Dev> eval)
+ -> decltype(AccessorConstructor::template getAccessor<cl::sycl::access::mode::read, false>(cgh, eval)){
+ return AccessorConstructor::template getAccessor<cl::sycl::access::mode::read, false>(cgh, eval);
+ }
+};
+
+/// specialisation of the \ref ExtractAccessor struct when the node type is TensorReductionOp
+template <typename OP, typename Dim, typename Expr, typename Dev>
+struct ExtractAccessor<TensorEvaluator<TensorReductionOp<OP, Dim, Expr>, Dev> >
+: ExtractAccessor<TensorEvaluator<const TensorReductionOp<OP, Dim, Expr>, Dev> >{};
+
/// template deduction for \ref ExtractAccessor
template <typename Evaluator>
auto createTupleOfAccessors(cl::sycl::handler& cgh, const Evaluator& expr)
-> decltype(ExtractAccessor<Evaluator>::getTuple(cgh, expr)) {
return ExtractAccessor<Evaluator>::getTuple(cgh, expr);
}
-}
-}
-}
+
+} /// namespace TensorSycl
+} /// namespace internal
+} /// namespace Eigen
#endif // UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSORSYCL_EXTRACT_ACCESSOR_HPP
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractFunctors.h b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractFunctors.h
index 801b4f5d7..427125343 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractFunctors.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractFunctors.h
@@ -141,7 +141,30 @@ template <typename RHSExpr, typename Dev>
struct FunctorExtractor<TensorEvaluator<TensorEvalToOp<RHSExpr>, Dev> >
: FunctorExtractor<TensorEvaluator<const TensorEvalToOp<RHSExpr>, Dev> > {};
+template<typename Dim, size_t NumOutputDim> struct DimConstr {
+template<typename InDim>
+ static inline Dim getDim(InDim dims ) {return dims;}
+};
+
+template<typename Dim> struct DimConstr<Dim, 0> {
+ template<typename InDim>
+ static inline Dim getDim(InDim dims ) {return Dim(dims.TotalSize());}
+};
+
+template<typename Op, typename Dims, typename ArgType, template <class> class MakePointer_, typename Device>
+struct FunctorExtractor<TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType, MakePointer_>, Device>>{
+ typedef TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType, MakePointer_>, Device> Evaluator;
+ typedef typename Eigen::internal::conditional<Evaluator::NumOutputDims==0, DSizes<typename Evaluator::Index, 1>, typename Evaluator::Dimensions >::type Dimensions;
+ const Dimensions m_dimensions;
+ const Dimensions& dimensions() const { return m_dimensions; }
+ FunctorExtractor(const TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType, MakePointer_>, Device>& expr)
+ : m_dimensions(DimConstr<Dimensions, Evaluator::NumOutputDims>::getDim(expr.dimensions())) {}
+};
+
+template<typename Op, typename Dims, typename ArgType, template <class> class MakePointer_, typename Device>
+struct FunctorExtractor<TensorEvaluator<TensorReductionOp<Op, Dims, ArgType, MakePointer_>, Device>>
+: FunctorExtractor<TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType, MakePointer_>, Device>>{};
/// template deduction function for FunctorExtractor
template <typename Evaluator>
auto inline extractFunctors(const Evaluator& evaluator)-> FunctorExtractor<Evaluator> {
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclLeafCount.h b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclLeafCount.h
index 8d520d2da..25d1fac9b 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclLeafCount.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclLeafCount.h
@@ -43,8 +43,7 @@ struct CategoryCount<Arg,Args...>{
static const size_t Count = LeafCount<Arg>::Count + CategoryCount<Args...>::Count;
};
-/// specialisation of the \ref LeafCount struct when the node type is const
-/// TensorMap
+/// specialisation of the \ref LeafCount struct when the node type is const TensorMap
template <typename PlainObjectType, int Options_, template <class> class MakePointer_>
struct LeafCount<const TensorMap<PlainObjectType, Options_, MakePointer_> > {
static const size_t Count =1;
@@ -61,18 +60,15 @@ struct LeafCount<const CategoryExpr<OP, RHSExpr...> >: CategoryCount<RHSExpr...>
template <template <class, class...> class CategoryExpr, typename OP, typename... RHSExpr>
struct LeafCount<CategoryExpr<OP, RHSExpr...> > :LeafCount<const CategoryExpr<OP, RHSExpr...> >{};
-/// specialisation of the \ref LeafCount struct when the node type is
-/// const TensorSelectOp is an exception
+/// specialisation of the \ref LeafCount struct when the node type is const TensorSelectOp is an exception
template <typename IfExpr, typename ThenExpr, typename ElseExpr>
struct LeafCount<const TensorSelectOp<IfExpr, ThenExpr, ElseExpr> > : CategoryCount<IfExpr, ThenExpr, ElseExpr> {};
-/// specialisation of the \ref LeafCount struct when the node type is
-/// TensorSelectOp
+/// specialisation of the \ref LeafCount struct when the node type is TensorSelectOp
template <typename IfExpr, typename ThenExpr, typename ElseExpr>
struct LeafCount<TensorSelectOp<IfExpr, ThenExpr, ElseExpr> >: LeafCount<const TensorSelectOp<IfExpr, ThenExpr, ElseExpr> > {};
-/// specialisation of the \ref LeafCount struct when the node type is const
-/// TensorAssignOp
+/// specialisation of the \ref LeafCount struct when the node type is const TensorAssignOp
template <typename LHSExpr, typename RHSExpr>
struct LeafCount<const TensorAssignOp<LHSExpr, RHSExpr> >: CategoryCount<LHSExpr,RHSExpr> {};
@@ -81,31 +77,38 @@ struct LeafCount<const TensorAssignOp<LHSExpr, RHSExpr> >: CategoryCount<LHSExpr
template <typename LHSExpr, typename RHSExpr>
struct LeafCount<TensorAssignOp<LHSExpr, RHSExpr> > :LeafCount<const TensorAssignOp<LHSExpr, RHSExpr> >{};
-/// specialisation of the \ref LeafCount struct when the node type is const
-/// TensorForcedEvalOp
+/// specialisation of the \ref LeafCount struct when the node type is const TensorForcedEvalOp
template <typename Expr>
struct LeafCount<const TensorForcedEvalOp<Expr> > {
static const size_t Count =1;
};
-/// specialisation of the \ref LeafCount struct when the node type is
-/// TensorForcedEvalOp
+/// specialisation of the \ref LeafCount struct when the node type is TensorForcedEvalOp
template <typename Expr>
struct LeafCount<TensorForcedEvalOp<Expr> >: LeafCount<const TensorForcedEvalOp<Expr> > {};
-/// specialisation of the \ref LeafCount struct when the node type is const
-/// TensorEvalToOp
+/// specialisation of the \ref LeafCount struct when the node type is const TensorEvalToOp
template <typename Expr>
struct LeafCount<const TensorEvalToOp<Expr> > {
static const size_t Count = 1 + CategoryCount<Expr>::Count;
};
-/// specialisation of the \ref LeafCount struct when the node type is
-/// TensorEvalToOp
+/// specialisation of the \ref LeafCount struct when the node type is const TensorReductionOp
+template <typename OP, typename Dim, typename Expr>
+struct LeafCount<const TensorReductionOp<OP, Dim, Expr> > {
+ static const size_t Count =1;
+};
+
+/// specialisation of the \ref LeafCount struct when the node type is TensorReductionOp
+template <typename OP, typename Dim, typename Expr>
+struct LeafCount<TensorReductionOp<OP, Dim, Expr> >: LeafCount<const TensorReductionOp<OP, Dim, Expr> >{};
+
+/// specialisation of the \ref LeafCount struct when the node type is TensorEvalToOp
template <typename Expr>
struct LeafCount<TensorEvalToOp<Expr> >: LeafCount<const TensorEvalToOp<Expr> >{};
-}
-}
-} // namespace Eigen
+
+} /// namespace TensorSycl
+} /// namespace internal
+} /// namespace Eigen
#endif // UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSORSYCL_LEAF_COUNT_HPP
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclPlaceHolder.h b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclPlaceHolder.h
deleted file mode 100644
index 43a63c73d..000000000
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclPlaceHolder.h
+++ /dev/null
@@ -1,99 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Mehdi Goli Codeplay Software Ltd.
-// Ralph Potter Codeplay Software Ltd.
-// Luke Iwanski Codeplay Software Ltd.
-// Contact: <eigen@codeplay.com>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-/*****************************************************************
- * TensorSyclPlaceHolder.h
- *
- * \brief:
- * The PlaceHolder expression are nothing but a container preserving
- * the order of actual data in the tuple of sycl buffer.
- *
-*****************************************************************/
-
-#ifndef UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSORSYCL_PLACEHOLDER_HPP
-#define UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSORSYCL_PLACEHOLDER_HPP
-
-namespace Eigen {
-namespace internal {
-/// \struct PlaceHolder
-/// \brief PlaceHolder is used to replace the \ref TensorMap in the expression
-/// tree.
-/// PlaceHolder contains the order of the leaf node in the expression tree.
-template <typename Scalar, size_t N>
-struct PlaceHolder {
- static constexpr size_t I = N;
- typedef Scalar Type;
-};
-
-/// \brief specialisation of the PlaceHolder node for const TensorMap
-#define TENSORMAPPLACEHOLDER(CVQual)\
-template <typename PlainObjectType, int Options_, template <class> class MakePointer_, size_t N>\
-struct PlaceHolder<CVQual TensorMap<PlainObjectType, Options_, MakePointer_>, N> {\
- static const size_t I = N;\
- typedef CVQual TensorMap<PlainObjectType, Options_, MakePointer_> Type;\
- typedef typename Type::Self Self;\
- typedef typename Type::Base Base;\
- typedef typename Type::Nested Nested;\
- typedef typename Type::StorageKind StorageKind;\
- typedef typename Type::Index Index;\
- typedef typename Type::Scalar Scalar;\
- typedef typename Type::RealScalar RealScalar;\
- typedef typename Type::CoeffReturnType CoeffReturnType;\
-};
-
-TENSORMAPPLACEHOLDER(const)
-TENSORMAPPLACEHOLDER()
-#undef TENSORMAPPLACEHOLDER
-
-/// \brief specialisation of the PlaceHolder node for TensorForcedEvalOp. The
-/// TensorForcedEvalOp acts as a leaf node for its parent node.
-#define TENSORFORCEDEVALPLACEHOLDER(CVQual)\
-template <typename Expression, size_t N>\
-struct PlaceHolder<CVQual TensorForcedEvalOp<Expression>, N> {\
- static const size_t I = N;\
- typedef CVQual TensorForcedEvalOp<Expression> Type;\
- typedef typename Type::Nested Nested;\
- typedef typename Type::StorageKind StorageKind;\
- typedef typename Type::Index Index;\
- typedef typename Type::Scalar Scalar;\
- typedef typename Type::Packet Packet;\
- typedef typename Type::RealScalar RealScalar;\
- typedef typename Type::CoeffReturnType CoeffReturnType;\
- typedef typename Type::PacketReturnType PacketReturnType;\
-};
-
-TENSORFORCEDEVALPLACEHOLDER(const)
-TENSORFORCEDEVALPLACEHOLDER()
-#undef TENSORFORCEDEVALPLACEHOLDER
-
-template <typename PlainObjectType, int Options_, template <class> class Makepointer_, size_t N>
-struct traits<PlaceHolder<const TensorMap<PlainObjectType, Options_, Makepointer_>, N> >: public traits<PlainObjectType> {
- typedef traits<PlainObjectType> BaseTraits;
- typedef typename BaseTraits::Scalar Scalar;
- typedef typename BaseTraits::StorageKind StorageKind;
- typedef typename BaseTraits::Index Index;
- static const int NumDimensions = BaseTraits::NumDimensions;
- static const int Layout = BaseTraits::Layout;
- enum {
- Options = Options_,
- Flags = BaseTraits::Flags,
- };
-};
-
-template <typename PlainObjectType, int Options_, template <class> class Makepointer_, size_t N>
-struct traits<PlaceHolder<TensorMap<PlainObjectType, Options_, Makepointer_>, N> >
-: traits<PlaceHolder<const TensorMap<PlainObjectType, Options_, Makepointer_>, N> > {};
-
-} // end namespace internal
-} // end namespoace Eigen
-
-#endif // UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSORSYCL_PLACEHOLDER_HPP
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclPlaceHolderExpr.h b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclPlaceHolderExpr.h
index f456c35aa..d4c250c6d 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclPlaceHolderExpr.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclPlaceHolderExpr.h
@@ -25,6 +25,17 @@
namespace Eigen {
namespace TensorSycl {
namespace internal {
+
+/// \struct PlaceHolder
+/// \brief PlaceHolder is used to replace the \ref TensorMap in the expression
+/// tree.
+/// PlaceHolder contains the order of the leaf node in the expression tree.
+template <typename Scalar, size_t N>
+struct PlaceHolder {
+ static constexpr size_t I = N;
+ typedef Scalar Type;
+};
+
/// \sttruct PlaceHolderExpression
/// \brief it is used to create the PlaceHolder expression. The PlaceHolder
/// expression is a copy of expression type in which the TensorMap of the has
@@ -113,7 +124,7 @@ ASSIGNEXPR()
#define TENSORMAPEXPR(CVQual)\
template <typename Scalar_, int Options_, int Options2_, int NumIndices_, typename IndexType_, template <class> class MakePointer_, size_t N>\
struct PlaceHolderExpression< CVQual TensorMap< Tensor<Scalar_, NumIndices_, Options_, IndexType_>, Options2_, MakePointer_>, N> {\
- typedef CVQual Eigen::internal::PlaceHolder<CVQual TensorMap<Tensor<Scalar_, NumIndices_, Options_, IndexType_>, Options2_, MakePointer_>, N> Type;\
+ typedef CVQual PlaceHolder<CVQual TensorMap<Tensor<Scalar_, NumIndices_, Options_, IndexType_>, Options2_, MakePointer_>, N> Type;\
};
TENSORMAPEXPR(const)
@@ -125,7 +136,7 @@ TENSORMAPEXPR()
#define FORCEDEVAL(CVQual)\
template <typename Expr, size_t N>\
struct PlaceHolderExpression<CVQual TensorForcedEvalOp<Expr>, N> {\
- typedef CVQual Eigen::internal::PlaceHolder<CVQual TensorForcedEvalOp<Expr>, N> Type;\
+ typedef CVQual PlaceHolder<CVQual TensorForcedEvalOp<Expr>, N> Type;\
};
FORCEDEVAL(const)
@@ -144,6 +155,18 @@ EVALTO(const)
EVALTO()
#undef EVALTO
+
+/// specialisation of the \ref PlaceHolderExpression when the node is
+/// TensorReductionOp
+#define SYCLREDUCTION(CVQual)\
+template <typename OP, typename Dims, typename Expr, size_t N>\
+struct PlaceHolderExpression<CVQual TensorReductionOp<OP, Dims, Expr>, N>{\
+ typedef CVQual PlaceHolder<CVQual TensorReductionOp<OP, Dims,Expr>, N> Type;\
+};
+SYCLREDUCTION(const)
+SYCLREDUCTION()
+#undef SYCLREDUCTION
+
/// template deduction for \ref PlaceHolderExpression struct
template <typename Expr>
struct createPlaceHolderExpression {
@@ -151,8 +174,8 @@ struct createPlaceHolderExpression {
typedef typename PlaceHolderExpression<Expr, TotalLeaves - 1>::Type Type;
};
-}
-}
+} // internal
+} // TensorSycl
} // namespace Eigen
#endif // UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSORSYCL_PLACEHOLDER_EXPR_HPP
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclRun.h b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclRun.h
index 57f2dda26..7914b6fad 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclRun.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclRun.h
@@ -37,20 +37,20 @@ void run(Expr &expr, Dev &dev) {
typedef typename internal::createPlaceHolderExpression<Expr>::Type PlaceHolderExpr;
auto functors = internal::extractFunctors(evaluator);
+ size_t tileSize =dev.m_queue.get_device(). template get_info<cl::sycl::info::device::max_work_group_size>()/2;
dev.m_queue.submit([&](cl::sycl::handler &cgh) {
// create a tuple of accessors from Evaluator
auto tuple_of_accessors = internal::createTupleOfAccessors<decltype(evaluator)>(cgh, evaluator);
const auto range = utility::tuple::get<0>(tuple_of_accessors).get_range()[0];
-
- size_t outTileSize = range;
- if (range > 64) outTileSize = 64;
- size_t yMode = range % outTileSize;
- int yRange = static_cast<int>(range);
- if (yMode != 0) yRange += (outTileSize - yMode);
-
+ size_t GRange=range;
+ if (tileSize>GRange) tileSize=GRange;
+ else if(GRange>tileSize){
+ size_t xMode = GRange % tileSize;
+ if (xMode != 0) GRange += (tileSize - xMode);
+ }
// run the kernel
- cgh.parallel_for<PlaceHolderExpr>( cl::sycl::nd_range<1>(cl::sycl::range<1>(yRange), cl::sycl::range<1>(outTileSize)), [=](cl::sycl::nd_item<1> itemID) {
+ cgh.parallel_for<PlaceHolderExpr>( cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(tileSize)), [=](cl::sycl::nd_item<1> itemID) {
typedef typename internal::ConvertToDeviceExpression<Expr>::Type DevExpr;
auto device_expr =internal::createDeviceExpression<DevExpr, PlaceHolderExpr>(functors, tuple_of_accessors);
auto device_evaluator = Eigen::TensorEvaluator<decltype(device_expr.expr), Eigen::DefaultDevice>(device_expr.expr, Eigen::DefaultDevice());
@@ -61,6 +61,7 @@ void run(Expr &expr, Dev &dev) {
});
dev.m_queue.throw_asynchronous();
}
+
evaluator.cleanup();
}
} // namespace TensorSycl
diff --git a/unsupported/test/CMakeLists.txt b/unsupported/test/CMakeLists.txt
index aeaea162a..b5fa1c845 100644
--- a/unsupported/test/CMakeLists.txt
+++ b/unsupported/test/CMakeLists.txt
@@ -1,6 +1,6 @@
# generate split test header file only if it does not yet exist
# in order to prevent a rebuild everytime cmake is configured
-if(NOT EXISTS ${CMAKE_CURRENT_BINARY_DIR}/split_test_helper.h)
+if(NOT EXISTS ${CMAKE_CURRENT_BINARY_DIR}/split_test_helper.h)
file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/split_test_helper.h "")
foreach(i RANGE 1 999)
file(APPEND ${CMAKE_CURRENT_BINARY_DIR}/split_test_helper.h
@@ -16,11 +16,11 @@ endif()
set_property(GLOBAL PROPERTY EIGEN_CURRENT_SUBPROJECT "Unsupported")
add_custom_target(BuildUnsupported)
-include_directories(../../test ../../unsupported ../../Eigen
+include_directories(../../test ../../unsupported ../../Eigen
${CMAKE_CURRENT_BINARY_DIR}/../../test)
find_package (Threads)
-
+
find_package(GoogleHash)
if(GOOGLEHASH_FOUND)
add_definitions("-DEIGEN_GOOGLEHASH_SUPPORT")
@@ -134,7 +134,7 @@ ei_add_test(cxx11_tensor_roundings)
ei_add_test(cxx11_tensor_layout_swap)
ei_add_test(cxx11_tensor_io)
if("${CMAKE_SIZEOF_VOID_P}" EQUAL "8")
- # This test requires __uint128_t which is only available on 64bit systems
+ # This test requires __uint128_t which is only available on 64bit systems
ei_add_test(cxx11_tensor_uint128)
endif()
endif()
@@ -145,6 +145,7 @@ if(EIGEN_TEST_CXX11)
ei_add_test_sycl(cxx11_tensor_forced_eval_sycl "-std=c++11")
ei_add_test_sycl(cxx11_tensor_broadcast_sycl "-std=c++11")
ei_add_test_sycl(cxx11_tensor_device_sycl "-std=c++11")
+ ei_add_test_sycl(cxx11_tensor_reduction_sycl "-std=c++11")
endif(EIGEN_TEST_SYCL)
# It should be safe to always run these tests as there is some fallback code for
# older compiler that don't support cxx11.
diff --git a/unsupported/test/cxx11_tensor_reduction_sycl.cpp b/unsupported/test/cxx11_tensor_reduction_sycl.cpp
new file mode 100644
index 000000000..bd09744a6
--- /dev/null
+++ b/unsupported/test/cxx11_tensor_reduction_sycl.cpp
@@ -0,0 +1,147 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2015
+// Mehdi Goli Codeplay Software Ltd.
+// Ralph Potter Codeplay Software Ltd.
+// Luke Iwanski Codeplay Software Ltd.
+// Contact: <eigen@codeplay.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#define EIGEN_TEST_NO_LONGDOUBLE
+#define EIGEN_TEST_NO_COMPLEX
+#define EIGEN_TEST_FUNC cxx11_tensor_reduction_sycl
+#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int
+#define EIGEN_USE_SYCL
+
+#include "main.h"
+#include <unsupported/Eigen/CXX11/Tensor>
+
+
+
+static void test_full_reductions_sycl() {
+
+
+ cl::sycl::gpu_selector s;
+ cl::sycl::queue q(s, [=](cl::sycl::exception_list l) {
+ for (const auto& e : l) {
+ try {
+ std::rethrow_exception(e);
+ } catch (cl::sycl::exception e) {
+ std::cout << e.what() << std::endl;
+ }
+ }
+ });
+ Eigen::SyclDevice sycl_device(q);
+
+ const int num_rows = 452;
+ const int num_cols = 765;
+ array<int, 2> tensorRange = {{num_rows, num_cols}};
+
+ Tensor<float, 2> in(tensorRange);
+ in.setRandom();
+
+ Tensor<float, 0> full_redux;
+ Tensor<float, 0> full_redux_g;
+ full_redux = in.sum();
+ float* out_data = (float*)sycl_device.allocate(sizeof(float));
+ TensorMap<Tensor<float, 2> > in_gpu(in.data(), tensorRange);
+ TensorMap<Tensor<float, 0> > full_redux_gpu(out_data);
+ full_redux_gpu.device(sycl_device) = in_gpu.sum();
+ sycl_device.deallocate(out_data);
+ // Check that the CPU and GPU reductions return the same result.
+ VERIFY_IS_APPROX(full_redux_gpu(), full_redux());
+
+}
+
+
+static void test_first_dim_reductions_sycl() {
+
+
+ cl::sycl::gpu_selector s;
+ cl::sycl::queue q(s, [=](cl::sycl::exception_list l) {
+ for (const auto& e : l) {
+ try {
+ std::rethrow_exception(e);
+ } catch (cl::sycl::exception e) {
+ std::cout << e.what() << std::endl;
+ }
+ }
+ });
+ Eigen::SyclDevice sycl_device(q);
+
+ int dim_x = 145;
+ int dim_y = 1;
+ int dim_z = 67;
+
+ array<int, 3> tensorRange = {{dim_x, dim_y, dim_z}};
+
+ Tensor<float, 3> in(tensorRange);
+ in.setRandom();
+ Eigen::array<int, 1> red_axis;
+ red_axis[0] = 0;
+ Tensor<float, 2> redux = in.sum(red_axis);
+ array<int, 2> reduced_tensorRange = {{dim_y, dim_z}};
+ Tensor<float, 2> redux_g(reduced_tensorRange);
+ TensorMap<Tensor<float, 3> > in_gpu(in.data(), tensorRange);
+ float* out_data = (float*)sycl_device.allocate(dim_y*dim_z*sizeof(float));
+ TensorMap<Tensor<float, 2> > redux_gpu(out_data, dim_y, dim_z );
+ redux_gpu.device(sycl_device) = in_gpu.sum(red_axis);
+
+ sycl_device.deallocate(out_data);
+ // Check that the CPU and GPU reductions return the same result.
+ for(int j=0; j<dim_y; j++ )
+ for(int k=0; k<dim_z; k++ )
+ VERIFY_IS_APPROX(redux_gpu(j,k), redux(j,k));
+}
+
+
+static void test_last_dim_reductions_sycl() {
+
+
+ cl::sycl::gpu_selector s;
+ cl::sycl::queue q(s, [=](cl::sycl::exception_list l) {
+ for (const auto& e : l) {
+ try {
+ std::rethrow_exception(e);
+ } catch (cl::sycl::exception e) {
+ std::cout << e.what() << std::endl;
+ }
+ }
+ });
+ Eigen::SyclDevice sycl_device(q);
+
+ int dim_x = 567;
+ int dim_y = 1;
+ int dim_z = 47;
+
+ array<int, 3> tensorRange = {{dim_x, dim_y, dim_z}};
+
+ Tensor<float, 3> in(tensorRange);
+ in.setRandom();
+ Eigen::array<int, 1> red_axis;
+ red_axis[0] = 2;
+ Tensor<float, 2> redux = in.sum(red_axis);
+ array<int, 2> reduced_tensorRange = {{dim_x, dim_y}};
+ Tensor<float, 2> redux_g(reduced_tensorRange);
+ TensorMap<Tensor<float, 3> > in_gpu(in.data(), tensorRange);
+ float* out_data = (float*)sycl_device.allocate(dim_x*dim_y*sizeof(float));
+ TensorMap<Tensor<float, 2> > redux_gpu(out_data, dim_x, dim_y );
+ redux_gpu.device(sycl_device) = in_gpu.sum(red_axis);
+
+ sycl_device.deallocate(out_data);
+ // Check that the CPU and GPU reductions return the same result.
+ for(int j=0; j<dim_x; j++ )
+ for(int k=0; k<dim_y; k++ )
+ VERIFY_IS_APPROX(redux_gpu(j,k), redux(j,k));
+}
+
+void test_cxx11_tensor_reduction_sycl() {
+ CALL_SUBTEST((test_full_reductions_sycl()));
+ CALL_SUBTEST((test_first_dim_reductions_sycl()));
+ CALL_SUBTEST((test_last_dim_reductions_sycl()));
+
+}