aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h266
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorReductionSycl.h40
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorSyclExprConstructor.h19
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractAccessor.h5
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractFunctors.h2
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorSyclRun.h8
-rw-r--r--unsupported/test/cxx11_tensor_broadcast_sycl.cpp100
-rw-r--r--unsupported/test/cxx11_tensor_builtins_sycl.cpp6
-rw-r--r--unsupported/test/cxx11_tensor_device_sycl.cpp65
-rw-r--r--unsupported/test/cxx11_tensor_forced_eval_sycl.cpp47
-rw-r--r--unsupported/test/cxx11_tensor_morphing_sycl.cpp43
-rw-r--r--unsupported/test/cxx11_tensor_reduction_sycl.cpp83
-rw-r--r--unsupported/test/cxx11_tensor_sycl.cpp105
13 files changed, 435 insertions, 354 deletions
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h b/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h
index fe8452d79..d6d127153 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h
@@ -12,37 +12,34 @@
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-#include <iostream>
-
#if defined(EIGEN_USE_SYCL) && !defined(EIGEN_CXX11_TENSOR_TENSOR_DEVICE_SYCL_H)
#define EIGEN_CXX11_TENSOR_TENSOR_DEVICE_SYCL_H
namespace Eigen {
-struct SyclDevice {
- /// class members:
- bool exception_caught_ = false;
-
- /// sycl queue
- mutable cl::sycl::queue m_queue;
+#define ConvertToActualTypeSycl(T, buf_acc) reinterpret_cast<typename cl::sycl::global_ptr<T>::pointer_t>((&(*buf_acc.get_pointer())))
+
+struct QueueInterface {
+ /// class members:
/// std::map is the container used to make sure that we create only one buffer
/// per pointer. The lifespan of the buffer now depends on the lifespan of SyclDevice.
/// If a non-read-only pointer is needed to be accessed on the host we should manually deallocate it.
- mutable std::map<const void *, std::shared_ptr<void>> buffer_map;
-
+ mutable std::map<const uint8_t *, cl::sycl::buffer<uint8_t, 1>> buffer_map;
+ /// sycl queue
+ mutable cl::sycl::queue m_queue;
/// creating device by using selector
- template<typename dev_Selector> explicit SyclDevice(dev_Selector s):
+ /// SyclStreamDevice is not owned. it is the caller's responsibility to destroy it.
+ template<typename dev_Selector> explicit QueueInterface(dev_Selector s):
#ifdef EIGEN_EXCEPTIONS
m_queue(cl::sycl::queue(s, [=](cl::sycl::exception_list l) {
for (const auto& e : l) {
try {
- if (e) {
- exception_caught_ = true;
+ if(e){
std::rethrow_exception(e);
}
- } catch (const cl::sycl::exception& e) {
- std::cerr << e.what() << std::endl;
- }
+ } catch (cl::sycl::exception e) {
+ std::cerr << e.what() << std::endl;
+ }
}
}))
#else
@@ -50,63 +47,92 @@ struct SyclDevice {
#endif
{}
- // destructor
- ~SyclDevice() { deallocate_all(); }
+ /// Allocating device pointer. This pointer is actually an 8 bytes host pointer used as key to access the sycl device buffer.
+ /// The reason is that we cannot use device buffer as a pointer as a m_data in Eigen leafNode expressions. So we create a key
+ /// pointer to be used in Eigen expression construction. When we convert the Eigen construction into the sycl construction we
+ /// use this pointer as a key in our buffer_map and we make sure that we dedicate only one buffer only for this pointer.
+ /// The device pointer would be deleted by calling deallocate function.
+ EIGEN_STRONG_INLINE void* allocate(size_t num_bytes) const {
+ auto buf = cl::sycl::buffer<uint8_t,1>(cl::sycl::range<1>(num_bytes));
+ auto ptr =buf.get_access<cl::sycl::access::mode::discard_write, cl::sycl::access::target::host_buffer>().get_pointer();
+ buf.set_final_data(nullptr);
+ buffer_map.insert(std::pair<const uint8_t *, cl::sycl::buffer<uint8_t, 1>>(ptr,buf));
+ return static_cast<void*>(ptr);
+ }
/// This is used to deallocate the device pointer. p is used as a key inside
/// the map to find the device buffer and delete it.
- template <typename T> EIGEN_STRONG_INLINE void deallocate(T *p) const {
- auto it = buffer_map.find(p);
+ EIGEN_STRONG_INLINE void deallocate(const void *p) const {
+ auto it = buffer_map.find(static_cast<const uint8_t*>(p));
if (it != buffer_map.end()) {
buffer_map.erase(it);
- internal::aligned_free(p);
}
}
- /// This is called by the SyclDevice destructor to release all allocated memory if the user didn't already do so.
- /// We also free the host pointer that we have dedicated as a key to accessing the device buffer.
- EIGEN_STRONG_INLINE void deallocate_all() const {
- std::map<const void *, std::shared_ptr<void>>::iterator it=buffer_map.begin();
- while (it!=buffer_map.end()) {
- auto p=it->first;
- buffer_map.erase(it);
- internal::aligned_free(const_cast<void*>(p));
- it=buffer_map.begin();
+ EIGEN_STRONG_INLINE std::map<const uint8_t *, cl::sycl::buffer<uint8_t,1>>::iterator find_buffer(const void* ptr) const {
+ auto it1 = buffer_map.find(static_cast<const uint8_t*>(ptr));
+ if (it1 != buffer_map.end()){
+ return it1;
+ }
+ else{
+ for(std::map<const uint8_t *, cl::sycl::buffer<uint8_t,1>>::iterator it=buffer_map.begin(); it!=buffer_map.end(); ++it){
+ auto size = it->second.get_size();
+ if((it->first < (static_cast<const uint8_t*>(ptr))) && ((static_cast<const uint8_t*>(ptr)) < (it->first + size)) ) return it;
+ }
+ }
+ //eigen_assert("No sycl buffer found. Make sure that you have allocated memory for your buffer by calling allocate function in SyclDevice");
+ std::cerr << "No sycl buffer found. Make sure that you have allocated memory for your buffer by calling allocate function in SyclDevice"<< std::endl;
+ abort();
+ //return buffer_map.end();
+ }
+
+ // destructor
+ ~QueueInterface() { buffer_map.clear(); }
+};
+
+template <typename T> class MemCopyFunctor {
+ public:
+ typedef cl::sycl::accessor<uint8_t, 1, cl::sycl::access::mode::read, cl::sycl::access::target::global_buffer> read_accessor;
+ typedef cl::sycl::accessor<uint8_t, 1, cl::sycl::access::mode::discard_write, cl::sycl::access::target::global_buffer> write_accessor;
+ MemCopyFunctor(read_accessor src_acc, write_accessor dst_acc, size_t rng, size_t i, size_t offset): m_src_acc(src_acc), m_dst_acc(dst_acc), m_rng(rng), m_i(i), m_offset(offset) {}
+ void operator()(cl::sycl::nd_item<1> itemID) {
+ auto src_ptr = ConvertToActualTypeSycl(T, m_src_acc);
+ auto dst_ptr = ConvertToActualTypeSycl(T, m_dst_acc);
+ auto globalid = itemID.get_global_linear_id();
+ if (globalid< m_rng) {
+ dst_ptr[globalid + m_i] = src_ptr[globalid + m_offset];
}
- buffer_map.clear();
}
+ private:
+ read_accessor m_src_acc;
+ write_accessor m_dst_acc;
+ size_t m_rng;
+ size_t m_i;
+ size_t m_offset;
+};
+
+struct SyclDevice {
+ // class member.
+ QueueInterface* m_queu_stream;
+ /// QueueInterface is not owned. it is the caller's responsibility to destroy it.
+ explicit SyclDevice(QueueInterface* queu_stream):m_queu_stream(queu_stream){}
/// Creation of sycl accessor for a buffer. This function first tries to find
/// the buffer in the buffer_map. If found it gets the accessor from it, if not,
/// the function then adds an entry by creating a sycl buffer for that particular pointer.
- template <cl::sycl::access::mode AcMd, typename T> EIGEN_STRONG_INLINE cl::sycl::accessor<T, 1, AcMd, cl::sycl::access::target::global_buffer>
- get_sycl_accessor(size_t num_bytes, cl::sycl::handler &cgh, const T * ptr) const {
- return (get_sycl_buffer<T>(num_bytes, ptr)->template get_access<AcMd, cl::sycl::access::target::global_buffer>(cgh));
- }
-
- /// Inserting a new sycl buffer. For every allocated device pointer only one buffer would be created. The buffer type is a device- only buffer.
- /// The key pointer used to access the device buffer(the device pointer(ptr) ) must be initialised by the allocate function.
- template<typename T> EIGEN_STRONG_INLINE std::pair<std::map<const void *, std::shared_ptr<void>>::iterator,bool> add_sycl_buffer(size_t num_bytes, const T *ptr) const {
- using Type = cl::sycl::buffer<T, 1>;
- std::pair<std::map<const void *, std::shared_ptr<void>>::iterator,bool> ret;
- if(ptr!=nullptr){
- ret= buffer_map.insert(std::pair<const void *, std::shared_ptr<void>>(ptr, std::shared_ptr<void>(new Type(cl::sycl::range<1>(num_bytes)),
- [](void *dataMem) { delete static_cast<Type*>(dataMem); })));
- (static_cast<Type*>(ret.first->second.get()))->set_final_data(nullptr);
- } else {
- eigen_assert("The device memory is not allocated. Please call allocate on the device!!");
- }
- return ret;
+ template <cl::sycl::access::mode AcMd> EIGEN_STRONG_INLINE cl::sycl::accessor<uint8_t, 1, AcMd, cl::sycl::access::target::global_buffer>
+ get_sycl_accessor(size_t num_bytes, cl::sycl::handler &cgh, const void* ptr) const {
+ return (get_sycl_buffer(num_bytes, ptr).template get_access<AcMd, cl::sycl::access::target::global_buffer>(cgh));
}
/// Accessing the created sycl device buffer for the device pointer
- template <typename T> EIGEN_STRONG_INLINE cl::sycl::buffer<T, 1>* get_sycl_buffer(size_t num_bytes,const T * ptr) const {
- return static_cast<cl::sycl::buffer<T, 1>*>(add_sycl_buffer(num_bytes, ptr).first->second.get());
+ EIGEN_STRONG_INLINE cl::sycl::buffer<uint8_t, 1>& get_sycl_buffer(size_t , const void * ptr) const {
+ return m_queu_stream->find_buffer(ptr)->second;
}
/// This is used to prepare the number of threads and also the number of threads per block for sycl kernels
EIGEN_STRONG_INLINE void parallel_for_setup(size_t n, size_t &tileSize, size_t &rng, size_t &GRange) const {
- tileSize =m_queue.get_device(). template get_info<cl::sycl::info::device::max_work_group_size>()/2;
+ tileSize =sycl_queue().get_device(). template get_info<cl::sycl::info::device::max_work_group_size>()/2;
rng = n;
if (rng==0) rng=1;
GRange=rng;
@@ -116,57 +142,35 @@ struct SyclDevice {
if (xMode != 0) GRange += (tileSize - xMode);
}
}
-
- /// Allocating device pointer. This pointer is actually an 8 bytes host pointer used as key to access the sycl device buffer.
- /// The reason is that we cannot use device buffer as a pointer as a m_data in Eigen leafNode expressions. So we create a key
- /// pointer to be used in Eigen expression construction. When we convert the Eigen construction into the sycl construction we
- /// use this pointer as a key in our buffer_map and we make sure that we dedicate only one buffer only for this pointer.
- /// The device pointer would be deleted by calling deallocate function.
- EIGEN_STRONG_INLINE void *allocate(size_t) const {
- return internal::aligned_malloc(8);
+ /// allocate device memory
+ EIGEN_STRONG_INLINE void *allocate(size_t num_bytes) const {
+ return m_queu_stream->allocate(num_bytes);
}
+ /// deallocate device memory
+ EIGEN_STRONG_INLINE void deallocate(const void *p) const {
+ m_queu_stream->deallocate(p);
+ }
// some runtime conditions that can be applied here
EIGEN_STRONG_INLINE bool isDeviceSuitable() const { return true; }
- template <typename T> EIGEN_STRONG_INLINE std::map<const void *, std::shared_ptr<void>>::iterator find_nearest(const T* ptr) const {
- auto it1 = buffer_map.find(ptr);
- if (it1 != buffer_map.end()){
- return it1;
- }
- else{
- for(std::map<const void *, std::shared_ptr<void>>::iterator it=buffer_map.begin(); it!=buffer_map.end(); ++it){
- auto size = ((cl::sycl::buffer<T, 1>*)it->second.get())->get_size();
- if((static_cast<const T*>(it->first) < ptr) && (ptr < (static_cast<const T*>(it->first)) + size)) return it;
- }
- }
- return buffer_map.end();
- }
/// the memcpy function
template<typename T> EIGEN_STRONG_INLINE void memcpy(void *dst, const T *src, size_t n) const {
- auto it1 = find_nearest(src);
- auto it2 = find_nearest(static_cast<T*>(dst));
- if ((it1 != buffer_map.end()) && (it2!=buffer_map.end())) {
- auto offset= (src - (static_cast<const T*>(it1->first)));
- auto i= ((static_cast<T*>(dst)) - const_cast<T*>((static_cast<const T*>(it2->first))));
- size_t rng, GRange, tileSize;
- parallel_for_setup(n/sizeof(T), tileSize, rng, GRange);
- m_queue.submit([&](cl::sycl::handler &cgh) {
- auto src_acc =((cl::sycl::buffer<T, 1>*)it1->second.get())-> template get_access<cl::sycl::access::mode::read, cl::sycl::access::target::global_buffer>(cgh);
- auto dst_acc =((cl::sycl::buffer<T, 1>*)it2->second.get())-> template get_access<cl::sycl::access::mode::discard_write, cl::sycl::access::target::global_buffer>(cgh);
- typedef decltype(src_acc) DevToDev;
- cgh.parallel_for<DevToDev>( cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(tileSize)), [=](cl::sycl::nd_item<1> itemID) {
- auto globalid=itemID.get_global_linear_id();
- if (globalid< rng) {
- dst_acc[globalid+i ]=src_acc[globalid+offset];
- }
- });
- });
- m_queue.throw_asynchronous();
- } else {
- eigen_assert("no source or destination device memory found.");
- }
+ auto it1 = m_queu_stream->find_buffer((void*)src);
+ auto it2 = m_queu_stream->find_buffer(dst);
+ auto offset= (static_cast<const uint8_t*>(static_cast<const void*>(src))) - it1->first;
+ auto i= (static_cast<const uint8_t*>(dst)) - it2->first;
+ offset/=sizeof(T);
+ i/=sizeof(T);
+ size_t rng, GRange, tileSize;
+ parallel_for_setup(n/sizeof(T), tileSize, rng, GRange);
+ sycl_queue().submit([&](cl::sycl::handler &cgh) {
+ auto src_acc =it1->second.template get_access<cl::sycl::access::mode::read, cl::sycl::access::target::global_buffer>(cgh);
+ auto dst_acc =it2->second.template get_access<cl::sycl::access::mode::discard_write, cl::sycl::access::target::global_buffer>(cgh);
+ cgh.parallel_for(cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(tileSize)), MemCopyFunctor<T>(src_acc, dst_acc, rng, 0, offset));
+ });
+ sycl_queue().throw_asynchronous();
}
/// The memcpyHostToDevice is used to copy the device only pointer to a host pointer. Using the device
@@ -175,8 +179,7 @@ struct SyclDevice {
/// buffer to host. Then we use the memcpy to copy the data to the host accessor. The first time that
/// this buffer is accessed, the data will be copied to the device.
template<typename T> EIGEN_STRONG_INLINE void memcpyHostToDevice(T *dst, const T *src, size_t n) const {
-
- auto host_acc= get_sycl_buffer(n, dst)-> template get_access<cl::sycl::access::mode::discard_write, cl::sycl::access::target::host_buffer>();
+ auto host_acc= get_sycl_buffer(n, dst). template get_access<cl::sycl::access::mode::discard_write, cl::sycl::access::target::host_buffer>();
::memcpy(host_acc.get_pointer(), src, n);
}
/// The memcpyDeviceToHost is used to copy the data from host to device. Here, in order to avoid double copying the data. We create a sycl
@@ -185,61 +188,44 @@ struct SyclDevice {
/// buffer with map_allocator on the gpu in parallel. At the end of the function call the destination buffer would be destroyed and the data
/// would be available on the dst pointer using fast copy technique (map_allocator). In this case we can make sure that we copy the data back
/// to the cpu only once per function call.
- template<typename T> EIGEN_STRONG_INLINE void memcpyDeviceToHost(T *dst, const T *src, size_t n) const {
- auto it = find_nearest(src);
- auto offset = src- (static_cast<const T*>(it->first));
- if (it != buffer_map.end()) {
+ template<typename T> EIGEN_STRONG_INLINE void memcpyDeviceToHost(void *dst, const T *src, size_t n) const {
+ auto it = m_queu_stream->find_buffer(src);
+ auto offset =static_cast<const uint8_t*>(static_cast<const void*>(src))- it->first;
+ offset/=sizeof(T);
size_t rng, GRange, tileSize;
parallel_for_setup(n/sizeof(T), tileSize, rng, GRange);
// Assuming that the dst is the start of the destination pointer
- auto dest_buf = cl::sycl::buffer<T, 1, cl::sycl::map_allocator<T>>(dst, cl::sycl::range<1>(rng));
- typedef decltype(dest_buf) SYCLDTOH;
- m_queue.submit([&](cl::sycl::handler &cgh) {
- auto src_acc= (static_cast<cl::sycl::buffer<T, 1>*>(it->second.get()))-> template get_access<cl::sycl::access::mode::read, cl::sycl::access::target::global_buffer>(cgh);
+ auto dest_buf = cl::sycl::buffer<uint8_t, 1, cl::sycl::map_allocator<uint8_t> >(static_cast<uint8_t*>(dst), cl::sycl::range<1>(rng*sizeof(T)));
+ sycl_queue().submit([&](cl::sycl::handler &cgh) {
+ auto src_acc= it->second.template get_access<cl::sycl::access::mode::read, cl::sycl::access::target::global_buffer>(cgh);
auto dst_acc =dest_buf.template get_access<cl::sycl::access::mode::discard_write, cl::sycl::access::target::global_buffer>(cgh);
- cgh.parallel_for<SYCLDTOH>( cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(tileSize)), [=](cl::sycl::nd_item<1> itemID) {
- auto globalid=itemID.get_global_linear_id();
- if (globalid< dst_acc.get_size()) {
- dst_acc[globalid] = src_acc[globalid + offset];
- }
- });
+ cgh.parallel_for( cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(tileSize)), MemCopyFunctor<T>(src_acc, dst_acc, rng, 0, offset));
});
- m_queue.throw_asynchronous();
-
- } else {
- eigen_assert("no device memory found. The memory might be destroyed before creation");
- }
+ sycl_queue().throw_asynchronous();
}
-
+ /// returning the sycl queue
+ EIGEN_STRONG_INLINE cl::sycl::queue& sycl_queue() const { return m_queu_stream->m_queue;}
/// Here is the implementation of memset function on sycl.
template<typename T> EIGEN_STRONG_INLINE void memset(T *buff, int c, size_t n) const {
- size_t rng, GRange, tileSize;
- parallel_for_setup(n/sizeof(T), tileSize, rng, GRange);
- m_queue.submit([&](cl::sycl::handler &cgh) {
- auto buf_acc =get_sycl_buffer(n, buff)-> template get_access<cl::sycl::access::mode::discard_write, cl::sycl::access::target::global_buffer>(cgh);
- cgh.parallel_for<SyclDevice>( cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(tileSize)), [=](cl::sycl::nd_item<1> itemID) {
- auto globalid=itemID.get_global_linear_id();
- auto buf_ptr= reinterpret_cast<typename cl::sycl::global_ptr<unsigned char>::pointer_t>((&(*buf_acc.get_pointer())));
- if (globalid< buf_acc.get_size()) {
- for(size_t i=0; i<sizeof(T); i++)
- buf_ptr[globalid*sizeof(T) + i] = c;
- }
- });
+ size_t rng, GRange, tileSize;
+ parallel_for_setup(n/sizeof(T), tileSize, rng, GRange);
+ sycl_queue().submit([&](cl::sycl::handler &cgh) {
+ auto buf_acc =get_sycl_buffer(n, static_cast<uint8_t*>(static_cast<void*>(buff))). template get_access<cl::sycl::access::mode::discard_write, cl::sycl::access::target::global_buffer>(cgh);
+ cgh.parallel_for<SyclDevice>( cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(tileSize)), [=](cl::sycl::nd_item<1> itemID) {
+ auto globalid=itemID.get_global_linear_id();
+ if (globalid< buf_acc.get_size()) {
+ for(size_t i=0; i<sizeof(T); i++)
+ buf_acc[globalid*sizeof(T) + i] = c;
+ }
});
- m_queue.throw_asynchronous();
+ });
+ sycl_queue().throw_asynchronous();
}
/// No need for sycl it should act the same as CPU version
- EIGEN_STRONG_INLINE int majorDeviceVersion() const {
- return 1;
- }
+ EIGEN_STRONG_INLINE int majorDeviceVersion() const { return 1; }
+ /// There is no need to synchronise the buffer in sycl as it is automatically handled by sycl runtime scheduler.
EIGEN_STRONG_INLINE void synchronize() const {
- m_queue.wait_and_throw();
- }
-
- // This function checks if the runtime recorded an error for the
- // underlying stream device.
- EIGEN_STRONG_INLINE bool ok() const {
- return !exception_caught_;
+ sycl_queue().wait_and_throw();
}
};
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorReductionSycl.h b/unsupported/Eigen/CXX11/src/Tensor/TensorReductionSycl.h
index db23bd7b0..f293869ee 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorReductionSycl.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorReductionSycl.h
@@ -27,7 +27,7 @@ namespace internal {
template<typename CoeffReturnType, typename KernelName> struct syclGenericBufferReducer{
template<typename BufferTOut, typename BufferTIn>
-static void run(BufferTOut* bufOut, BufferTIn& bufI, const Eigen::SyclDevice& dev, size_t length, size_t local){
+static void run(BufferTOut& bufOut, BufferTIn& bufI, const Eigen::SyclDevice& dev, size_t length, size_t local){
do {
auto f = [length, local, bufOut, &bufI](cl::sycl::handler& h) mutable {
cl::sycl::nd_range<1> r{cl::sycl::range<1>{std::max(length, local)},
@@ -37,7 +37,7 @@ static void run(BufferTOut* bufOut, BufferTIn& bufI, const Eigen::SyclDevice& de
auto aI =
bufI.template get_access<cl::sycl::access::mode::read_write>(h);
auto aOut =
- bufOut->template get_access<cl::sycl::access::mode::discard_write>(h);
+ bufOut.template get_access<cl::sycl::access::mode::discard_write>(h);
cl::sycl::accessor<CoeffReturnType, 1, cl::sycl::access::mode::read_write,
cl::sycl::access::target::local>
scratch(cl::sycl::range<1>(local), h);
@@ -61,7 +61,7 @@ static void run(BufferTOut* bufOut, BufferTIn& bufI, const Eigen::SyclDevice& de
/* Apply the reduction operation between the current local
* id and the one on the other half of the vector. */
if (globalid < length) {
- int min = (length < local) ? length : local;
+ auto min = (length < local) ? length : local;
for (size_t offset = min / 2; offset > 0; offset /= 2) {
if (localid < offset) {
scratch[localid] += scratch[localid + offset];
@@ -72,14 +72,15 @@ static void run(BufferTOut* bufOut, BufferTIn& bufI, const Eigen::SyclDevice& de
if (localid == 0) {
aI[id.get_group(0)] = scratch[localid];
if((length<=local) && globalid ==0){
- aOut[globalid]=scratch[localid];
+ auto aOutPtr = ConvertToActualTypeSycl(CoeffReturnType, aOut);
+ aOutPtr[0]=scratch[0];
}
}
}
});
};
- dev.m_queue.submit(f);
- dev.m_queue.throw_asynchronous();
+ dev.sycl_queue().submit(f);
+ dev.sycl_queue().throw_asynchronous();
/* At this point, you could queue::wait_and_throw() to ensure that
* errors are caught quickly. However, this would likely impact
@@ -116,7 +117,7 @@ struct FullReducer<Self, Op, const Eigen::SyclDevice, Vectorizable> {
if(rng ==0) {
red_factor=1;
};
- size_t tileSize =dev.m_queue.get_device(). template get_info<cl::sycl::info::device::max_work_group_size>()/2;
+ size_t tileSize =dev.sycl_queue().get_device(). template get_info<cl::sycl::info::device::max_work_group_size>()/2;
size_t GRange=std::max((size_t )1, rng);
// convert global range to power of 2 for redecution
@@ -134,7 +135,9 @@ struct FullReducer<Self, Op, const Eigen::SyclDevice, Vectorizable> {
/// if the shared memory is less than the GRange, we set shared_mem size to the TotalSize and in this case one kernel would be created for recursion to reduce all to one.
if (GRange < outTileSize) outTileSize=GRange;
// getting final out buffer at the moment the created buffer is true because there is no need for assign
- auto out_buffer =dev.template get_sycl_buffer<typename Eigen::internal::remove_all<CoeffReturnType>::type>(self.dimensions().TotalSize(), output);
+// auto out_buffer =dev.template get_sycl_buffer<typename Eigen::internal::remove_all<CoeffReturnType>::type>(self.dimensions().TotalSize(), output);
+ auto out_buffer =dev.get_sycl_buffer(self.dimensions().TotalSize(), output);
+
/// creating the shared memory for calculating reduction.
/// This one is used to collect all the reduced value of shared memory as we dont have global barrier on GPU. Once it is saved we can
/// recursively apply reduction on it in order to reduce the whole.
@@ -142,7 +145,7 @@ struct FullReducer<Self, Op, const Eigen::SyclDevice, Vectorizable> {
typedef typename Eigen::internal::remove_all<decltype(self.xprDims())>::type Dims;
Dims dims= self.xprDims();
Op functor = reducer;
- dev.m_queue.submit([&](cl::sycl::handler &cgh) {
+ dev.sycl_queue().submit([&](cl::sycl::handler &cgh) {
// create a tuple of accessors from Evaluator
auto tuple_of_accessors = TensorSycl::internal::createTupleOfAccessors(cgh, self.impl());
auto tmp_global_accessor = temp_global_buffer. template get_access<cl::sycl::access::mode::read_write, cl::sycl::access::target::global_buffer>(cgh);
@@ -161,16 +164,16 @@ struct FullReducer<Self, Op, const Eigen::SyclDevice, Vectorizable> {
auto globalid=itemID.get_global_linear_id();
if(globalid<rng)
- tmp_global_accessor.get_pointer()[globalid]=InnerMostDimReducer<decltype(device_self_evaluator), Op, false>::reduce(device_self_evaluator, red_factor*globalid, red_factor, const_cast<Op&>(functor));
+ tmp_global_accessor.get_pointer()[globalid]=InnerMostDimReducer<decltype(device_self_evaluator), Op, false>::reduce(device_self_evaluator, static_cast<typename DevExpr::Index>(red_factor*globalid), red_factor, const_cast<Op&>(functor));
else
tmp_global_accessor.get_pointer()[globalid]=static_cast<CoeffReturnType>(0);
if(remaining!=0 && globalid==0 )
// this will add the rest of input buffer when the input size is not devidable to red_factor.
- tmp_global_accessor.get_pointer()[globalid]+=InnerMostDimReducer<decltype(device_self_evaluator), Op, false>::reduce(device_self_evaluator, red_factor*(rng), remaining, const_cast<Op&>(functor));
+ tmp_global_accessor.get_pointer()[0]+=InnerMostDimReducer<decltype(device_self_evaluator), Op, false>::reduce(device_self_evaluator, static_cast<typename DevExpr::Index>(red_factor*(rng)), static_cast<typename DevExpr::Index>(remaining), const_cast<Op&>(functor));
});
});
- dev.m_queue.throw_asynchronous();
+ dev.sycl_queue().throw_asynchronous();
/// This is used to recursively reduce the tmp value to an element of 1;
syclGenericBufferReducer<CoeffReturnType,HostExpr>::run(out_buffer, temp_global_buffer,dev, GRange, outTileSize);
@@ -198,7 +201,7 @@ struct InnerReducer<Self, Op, const Eigen::SyclDevice> {
Dims dims= self.xprDims();
Op functor = reducer;
- dev.m_queue.submit([&](cl::sycl::handler &cgh) {
+ dev.sycl_queue().submit([&](cl::sycl::handler &cgh) {
// create a tuple of accessors from Evaluator
auto tuple_of_accessors = TensorSycl::internal::createTupleOfAccessors(cgh, self.impl());
auto output_accessor = dev.template get_sycl_accessor<cl::sycl::access::mode::discard_write>(num_coeffs_to_preserve,cgh, output);
@@ -212,19 +215,20 @@ struct InnerReducer<Self, Op, const Eigen::SyclDevice> {
const auto device_self_expr= TensorReductionOp<Op, Dims, decltype(device_expr.expr) ,MakeGlobalPointer>(device_expr.expr, dims, functor);
/// This is the evaluator for device_self_expr. This is exactly similar to the self which has been passed to run function. The difference is
/// the device_evaluator is detectable and recognisable on the device.
- typedef Eigen::TensorEvaluator<decltype(device_self_expr), Eigen::DefaultDevice> DeiceSelf;
+ typedef Eigen::TensorEvaluator<decltype(device_self_expr), Eigen::DefaultDevice> DeviceSelf;
auto device_self_evaluator = Eigen::TensorEvaluator<decltype(device_self_expr), Eigen::DefaultDevice>(device_self_expr, Eigen::DefaultDevice());
+ auto output_accessor_ptr =ConvertToActualTypeSycl(typename DeviceSelf::CoeffReturnType, output_accessor);
/// const cast added as a naive solution to solve the qualifier drop error
auto globalid=itemID.get_global_linear_id();
if (globalid< range) {
- typename DeiceSelf::CoeffReturnType accum = functor.initialize();
- GenericDimReducer<DeiceSelf::NumReducedDims-1, DeiceSelf, Op>::reduce(device_self_evaluator, device_self_evaluator.firstInput(globalid),const_cast<Op&>(functor), &accum);
+ typename DeviceSelf::CoeffReturnType accum = functor.initialize();
+ GenericDimReducer<DeviceSelf::NumReducedDims-1, DeviceSelf, Op>::reduce(device_self_evaluator, device_self_evaluator.firstInput(static_cast<typename DevExpr::Index>(globalid)),const_cast<Op&>(functor), &accum);
functor.finalize(accum);
- output_accessor.get_pointer()[globalid]= accum;
+ output_accessor_ptr[globalid]= accum;
}
});
});
- dev.m_queue.throw_asynchronous();
+ dev.sycl_queue().throw_asynchronous();
return false;
}
};
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExprConstructor.h b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExprConstructor.h
index c3152513c..d7551d94f 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExprConstructor.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExprConstructor.h
@@ -30,7 +30,8 @@ namespace internal {
template <typename PtrType, size_t N, typename... Params>
struct EvalToLHSConstructor {
PtrType expr;
- EvalToLHSConstructor(const utility::tuple::Tuple<Params...> &t): expr((&(*(utility::tuple::get<N>(t).get_pointer())))) {}
+ EvalToLHSConstructor(const utility::tuple::Tuple<Params...> &t) : expr(ConvertToActualTypeSycl(typename Eigen::internal::remove_all<PtrType>::type, utility::tuple::get<N>(t))) {}
+ //EvalToLHSConstructor(const utility::tuple::Tuple<Params...> &t): expr((&(*(utility::tuple::get<N>(t).get_pointer())))) {}
};
/// \struct ExprConstructor is used to reconstruct the expression on the device and
@@ -53,9 +54,11 @@ CVQual PlaceHolder<CVQual TensorMap<T, Options3_, MakePointer_>, N>, Params...>{
Type expr;\
template <typename FuncDetector>\
ExprConstructor(FuncDetector &fd, const utility::tuple::Tuple<Params...> &t)\
- : expr(Type((&(*(utility::tuple::get<N>(t).get_pointer()))), fd.dimensions())) {}\
+ : expr(Type(ConvertToActualTypeSycl(typename Type::Scalar, utility::tuple::get<N>(t)), fd.dimensions())){}\
};
+//: expr(Type((&(*(utility::tuple::get<N>(t).get_pointer()))), fd.dimensions())) {}
+
TENSORMAP(const)
TENSORMAP()
@@ -163,7 +166,7 @@ struct ExprConstructor<CVQual TensorAssignOp<OrigLHSExpr, OrigRHSExpr>, CVQual
ASSIGN()
#undef ASSIGN
/// specialisation of the \ref ExprConstructor struct when the node type is
-/// TensorEvalToOp
+/// TensorEvalToOp /// 0 here is the output number in the buffer
#define EVALTO(CVQual)\
template <typename OrigExpr, typename Expr, typename... Params>\
struct ExprConstructor<CVQual TensorEvalToOp<OrigExpr, MakeGlobalPointer>, CVQual TensorEvalToOp<Expr>, Params...> {\
@@ -189,12 +192,13 @@ template <typename OrigExpr, typename DevExpr, size_t N, typename... Params>\
struct ExprConstructor<CVQual TensorForcedEvalOp<OrigExpr, MakeGlobalPointer>,\
CVQual PlaceHolder<CVQual TensorForcedEvalOp<DevExpr>, N>, Params...> {\
typedef CVQual TensorMap<Tensor<typename TensorForcedEvalOp<DevExpr, MakeGlobalPointer>::Scalar,\
- TensorForcedEvalOp<DevExpr, MakeGlobalPointer>::NumDimensions, 0, typename TensorForcedEvalOp<DevExpr>::Index>, 0, MakeGlobalPointer> Type;\
+ TensorForcedEvalOp<DevExpr, MakeGlobalPointer>::NumDimensions, Eigen::internal::traits<TensorForcedEvalOp<DevExpr, MakeGlobalPointer>>::Layout, typename TensorForcedEvalOp<DevExpr>::Index>, Eigen::internal::traits<TensorForcedEvalOp<DevExpr, MakeGlobalPointer>>::Layout, MakeGlobalPointer> Type;\
Type expr;\
template <typename FuncDetector>\
ExprConstructor(FuncDetector &fd, const utility::tuple::Tuple<Params...> &t)\
- : expr(Type((&(*(utility::tuple::get<N>(t).get_pointer()))), fd.dimensions())) {}\
+ : expr(Type(ConvertToActualTypeSycl(typename Type::Scalar, utility::tuple::get<N>(t)), fd.dimensions())) {}\
};
+//: expr(Type((&(*(utility::tuple::get<N>(t).get_pointer()))), fd.dimensions())) {}
FORCEDEVAL(const)
FORCEDEVAL()
@@ -214,12 +218,13 @@ struct ExprConstructor<CVQual TensorReductionOp<OP, Dim, OrigExpr, MakeGlobalPoi
CVQual PlaceHolder<CVQual TensorReductionOp<OP, Dim, DevExpr>, N>, Params...> {\
static const size_t NumIndices= ValueCondition< TensorReductionOp<OP, Dim, DevExpr, MakeGlobalPointer>::NumDimensions==0, 1, TensorReductionOp<OP, Dim, DevExpr, MakeGlobalPointer>::NumDimensions >::Res;\
typedef CVQual TensorMap<Tensor<typename TensorReductionOp<OP, Dim, DevExpr, MakeGlobalPointer>::Scalar,\
- NumIndices, 0, typename TensorReductionOp<OP, Dim, DevExpr>::Index>, 0, MakeGlobalPointer> Type;\
+ NumIndices, Eigen::internal::traits<TensorReductionOp<OP, Dim, DevExpr, MakeGlobalPointer>>::Layout, typename TensorReductionOp<OP, Dim, DevExpr>::Index>, Eigen::internal::traits<TensorReductionOp<OP, Dim, DevExpr, MakeGlobalPointer>>::Layout, MakeGlobalPointer> Type;\
Type expr;\
template <typename FuncDetector>\
ExprConstructor(FuncDetector &fd, const utility::tuple::Tuple<Params...> &t)\
- : expr(Type((&(*(utility::tuple::get<N>(t).get_pointer()))), fd.dimensions())) {}\
+ :expr(Type(ConvertToActualTypeSycl(typename Type::Scalar, utility::tuple::get<N>(t)), fd.dimensions())) {}\
};
+//: expr(Type((&(*(utility::tuple::get<N>(t).get_pointer()))), fd.dimensions())) {}
SYCLREDUCTIONEXPR(const)
SYCLREDUCTIONEXPR()
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractAccessor.h b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractAccessor.h
index 461aef128..94a1452ec 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractAccessor.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractAccessor.h
@@ -57,9 +57,8 @@ struct AccessorConstructor{
return utility::tuple::append(ExtractAccessor<Arg1>::getTuple(cgh, eval1),utility::tuple::append(ExtractAccessor<Arg2>::getTuple(cgh, eval2), ExtractAccessor<Arg3>::getTuple(cgh, eval3)));
}
template< cl::sycl::access::mode AcM, typename Arg> static inline auto getAccessor(cl::sycl::handler& cgh, Arg eval)
- -> decltype(utility::tuple::make_tuple( eval.device().template get_sycl_accessor<AcM,
- typename Eigen::internal::remove_all<typename Arg::CoeffReturnType>::type>(eval.dimensions().TotalSize(), cgh,eval.data()))){
- return utility::tuple::make_tuple(eval.device().template get_sycl_accessor<AcM, typename Eigen::internal::remove_all<typename Arg::CoeffReturnType>::type>(eval.dimensions().TotalSize(), cgh,eval.data()));
+ -> decltype(utility::tuple::make_tuple( eval.device().template get_sycl_accessor<AcM>(eval.dimensions().TotalSize(), cgh,eval.data()))){
+ return utility::tuple::make_tuple(eval.device().template get_sycl_accessor<AcM>(eval.dimensions().TotalSize(), cgh,eval.data()));
}
};
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractFunctors.h b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractFunctors.h
index ef56391ff..382f0cb50 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractFunctors.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractFunctors.h
@@ -148,7 +148,7 @@ template<typename InDim>
template<typename Dim> struct DimConstr<Dim, 0> {
template<typename InDim>
- static inline Dim getDim(InDim dims ) {return Dim(dims.TotalSize());}
+ static inline Dim getDim(InDim dims ) {return Dim(static_cast<Dim>(dims.TotalSize()));}
};
template<typename Op, typename Dims, typename ArgType, template <class> class MakePointer_, typename Device>
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclRun.h b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclRun.h
index 724eebd83..5742592de 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclRun.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclRun.h
@@ -37,11 +37,11 @@ void run(Expr &expr, Dev &dev) {
typedef typename internal::createPlaceHolderExpression<Expr>::Type PlaceHolderExpr;
auto functors = internal::extractFunctors(evaluator);
- dev.m_queue.submit([&](cl::sycl::handler &cgh) {
+ dev.sycl_queue().submit([&](cl::sycl::handler &cgh) {
// create a tuple of accessors from Evaluator
auto tuple_of_accessors = internal::createTupleOfAccessors<decltype(evaluator)>(cgh, evaluator);
size_t range, GRange, tileSize;
- dev.parallel_for_setup(utility::tuple::get<0>(tuple_of_accessors).get_range()[0], tileSize, range, GRange);
+ dev.parallel_for_setup(utility::tuple::get<0>(tuple_of_accessors).get_range()[0]/sizeof(typename Expr::Scalar), tileSize, range, GRange);
// run the kernel
cgh.parallel_for<PlaceHolderExpr>( cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(tileSize)), [=](cl::sycl::nd_item<1> itemID) {
@@ -49,11 +49,11 @@ void run(Expr &expr, Dev &dev) {
auto device_expr =internal::createDeviceExpression<DevExpr, PlaceHolderExpr>(functors, tuple_of_accessors);
auto device_evaluator = Eigen::TensorEvaluator<decltype(device_expr.expr), Eigen::DefaultDevice>(device_expr.expr, Eigen::DefaultDevice());
if (itemID.get_global_linear_id() < range) {
- device_evaluator.evalScalar(static_cast<int>(itemID.get_global_linear_id()));
+ device_evaluator.evalScalar(static_cast<typename DevExpr::Index>(itemID.get_global_linear_id()));
}
});
});
- dev.m_queue.throw_asynchronous();
+ dev.sycl_queue().throw_asynchronous();
}
evaluator.cleanup();
diff --git a/unsupported/test/cxx11_tensor_broadcast_sycl.cpp b/unsupported/test/cxx11_tensor_broadcast_sycl.cpp
index 02aa4c636..c4798d42c 100644
--- a/unsupported/test/cxx11_tensor_broadcast_sycl.cpp
+++ b/unsupported/test/cxx11_tensor_broadcast_sycl.cpp
@@ -25,38 +25,47 @@ using Eigen::SyclDevice;
using Eigen::Tensor;
using Eigen::TensorMap;
+template <typename DataType, int DataLayout>
static void test_broadcast_sycl_fixed(const Eigen::SyclDevice &sycl_device){
// BROADCAST test:
- array<int, 4> in_range = {{2, 3, 5, 7}};
- array<int, 4> broadcasts = {{2, 3, 1, 4}};
+ int inDim1=2;
+ int inDim2=3;
+ int inDim3=5;
+ int inDim4=7;
+ int bDim1=2;
+ int bDim2=3;
+ int bDim3=1;
+ int bDim4=4;
+ array<int, 4> in_range = {{inDim1, inDim2, inDim3, inDim4}};
+ array<int, 4> broadcasts = {{bDim1, bDim2, bDim3, bDim4}};
array<int, 4> out_range; // = in_range * broadcasts
for (size_t i = 0; i < out_range.size(); ++i)
out_range[i] = in_range[i] * broadcasts[i];
- Tensor<float, 4> input(in_range);
- Tensor<float, 4> out(out_range);
+ Tensor<DataType, 4, DataLayout> input(in_range);
+ Tensor<DataType, 4, DataLayout> out(out_range);
for (size_t i = 0; i < in_range.size(); ++i)
VERIFY_IS_EQUAL(out.dimension(i), out_range[i]);
for (int i = 0; i < input.size(); ++i)
- input(i) = static_cast<float>(i);
+ input(i) = static_cast<DataType>(i);
- float * gpu_in_data = static_cast<float*>(sycl_device.allocate(input.dimensions().TotalSize()*sizeof(float)));
- float * gpu_out_data = static_cast<float*>(sycl_device.allocate(out.dimensions().TotalSize()*sizeof(float)));
+ DataType * gpu_in_data = static_cast<DataType*>(sycl_device.allocate(input.dimensions().TotalSize()*sizeof(DataType)));
+ DataType * gpu_out_data = static_cast<DataType*>(sycl_device.allocate(out.dimensions().TotalSize()*sizeof(DataType)));
- TensorMap<TensorFixedSize<float, Sizes<2, 3, 5, 7>>> gpu_in(gpu_in_data, in_range);
- TensorMap<Tensor<float, 4>> gpu_out(gpu_out_data, out_range);
- sycl_device.memcpyHostToDevice(gpu_in_data, input.data(),(input.dimensions().TotalSize())*sizeof(float));
+ TensorMap<TensorFixedSize<DataType, Sizes<2, 3, 5, 7>, DataLayout>> gpu_in(gpu_in_data, in_range);
+ TensorMap<Tensor<DataType, 4, DataLayout>> gpu_out(gpu_out_data, out_range);
+ sycl_device.memcpyHostToDevice(gpu_in_data, input.data(),(input.dimensions().TotalSize())*sizeof(DataType));
gpu_out.device(sycl_device) = gpu_in.broadcast(broadcasts);
- sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(float));
+ sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(DataType));
- for (int i = 0; i < 4; ++i) {
- for (int j = 0; j < 9; ++j) {
- for (int k = 0; k < 5; ++k) {
- for (int l = 0; l < 28; ++l) {
+ for (int i = 0; i < inDim1*bDim1; ++i) {
+ for (int j = 0; j < inDim2*bDim2; ++j) {
+ for (int k = 0; k < inDim3*bDim3; ++k) {
+ for (int l = 0; l < inDim4*bDim4; ++l) {
VERIFY_IS_APPROX(input(i%2,j%3,k%5,l%7), out(i,j,k,l));
}
}
@@ -67,40 +76,48 @@ static void test_broadcast_sycl_fixed(const Eigen::SyclDevice &sycl_device){
sycl_device.deallocate(gpu_out_data);
}
-
+template <typename DataType, int DataLayout>
static void test_broadcast_sycl(const Eigen::SyclDevice &sycl_device){
// BROADCAST test:
- array<int, 4> in_range = {{2, 3, 5, 7}};
- array<int, 4> broadcasts = {{2, 3, 1, 4}};
+ int inDim1=2;
+ int inDim2=3;
+ int inDim3=5;
+ int inDim4=7;
+ int bDim1=2;
+ int bDim2=3;
+ int bDim3=1;
+ int bDim4=4;
+ array<int, 4> in_range = {{inDim1, inDim2, inDim3, inDim4}};
+ array<int, 4> broadcasts = {{bDim1, bDim2, bDim3, bDim4}};
array<int, 4> out_range; // = in_range * broadcasts
for (size_t i = 0; i < out_range.size(); ++i)
out_range[i] = in_range[i] * broadcasts[i];
- Tensor<float, 4> input(in_range);
- Tensor<float, 4> out(out_range);
+ Tensor<DataType, 4, DataLayout> input(in_range);
+ Tensor<DataType, 4, DataLayout> out(out_range);
for (size_t i = 0; i < in_range.size(); ++i)
VERIFY_IS_EQUAL(out.dimension(i), out_range[i]);
for (int i = 0; i < input.size(); ++i)
- input(i) = static_cast<float>(i);
+ input(i) = static_cast<DataType>(i);
- float * gpu_in_data = static_cast<float*>(sycl_device.allocate(input.dimensions().TotalSize()*sizeof(float)));
- float * gpu_out_data = static_cast<float*>(sycl_device.allocate(out.dimensions().TotalSize()*sizeof(float)));
+ DataType * gpu_in_data = static_cast<DataType*>(sycl_device.allocate(input.dimensions().TotalSize()*sizeof(DataType)));
+ DataType * gpu_out_data = static_cast<DataType*>(sycl_device.allocate(out.dimensions().TotalSize()*sizeof(DataType)));
- TensorMap<Tensor<float, 4>> gpu_in(gpu_in_data, in_range);
- TensorMap<Tensor<float, 4>> gpu_out(gpu_out_data, out_range);
- sycl_device.memcpyHostToDevice(gpu_in_data, input.data(),(input.dimensions().TotalSize())*sizeof(float));
+ TensorMap<Tensor<DataType, 4, DataLayout>> gpu_in(gpu_in_data, in_range);
+ TensorMap<Tensor<DataType, 4, DataLayout>> gpu_out(gpu_out_data, out_range);
+ sycl_device.memcpyHostToDevice(gpu_in_data, input.data(),(input.dimensions().TotalSize())*sizeof(DataType));
gpu_out.device(sycl_device) = gpu_in.broadcast(broadcasts);
- sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(float));
+ sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(DataType));
- for (int i = 0; i < 4; ++i) {
- for (int j = 0; j < 9; ++j) {
- for (int k = 0; k < 5; ++k) {
- for (int l = 0; l < 28; ++l) {
- VERIFY_IS_APPROX(input(i%2,j%3,k%5,l%7), out(i,j,k,l));
+ for (int i = 0; i < inDim1*bDim1; ++i) {
+ for (int j = 0; j < inDim2*bDim2; ++j) {
+ for (int k = 0; k < inDim3*bDim3; ++k) {
+ for (int l = 0; l < inDim4*bDim4; ++l) {
+ VERIFY_IS_APPROX(input(i%inDim1,j%inDim2,k%inDim3,l%inDim4), out(i,j,k,l));
}
}
}
@@ -110,10 +127,21 @@ static void test_broadcast_sycl(const Eigen::SyclDevice &sycl_device){
sycl_device.deallocate(gpu_out_data);
}
+template<typename DataType, typename dev_Selector> void sycl_broadcast_test_per_device(dev_Selector s){
+ QueueInterface queueInterface(s);
+ auto sycl_device = Eigen::SyclDevice(&queueInterface);
+ test_broadcast_sycl_fixed<DataType, RowMajor>(sycl_device);
+ test_broadcast_sycl<DataType, RowMajor>(sycl_device);
+ test_broadcast_sycl_fixed<DataType, ColMajor>(sycl_device);
+ test_broadcast_sycl<DataType, ColMajor>(sycl_device);
+}
void test_cxx11_tensor_broadcast_sycl() {
- cl::sycl::gpu_selector s;
- Eigen::SyclDevice sycl_device(s);
- CALL_SUBTEST(test_broadcast_sycl_fixed(sycl_device));
- CALL_SUBTEST(test_broadcast_sycl(sycl_device));
+ printf("Test on GPU: OpenCL\n");
+ CALL_SUBTEST(sycl_broadcast_test_per_device<float>((cl::sycl::gpu_selector())));
+ printf("repeating the test on CPU: OpenCL\n");
+ CALL_SUBTEST(sycl_broadcast_test_per_device<float>((cl::sycl::cpu_selector())));
+ printf("repeating the test on CPU: HOST\n");
+ CALL_SUBTEST(sycl_broadcast_test_per_device<float>((cl::sycl::host_selector())));
+ printf("Test Passed******************\n" );
}
diff --git a/unsupported/test/cxx11_tensor_builtins_sycl.cpp b/unsupported/test/cxx11_tensor_builtins_sycl.cpp
index d57d502ca..26cea18a6 100644
--- a/unsupported/test/cxx11_tensor_builtins_sycl.cpp
+++ b/unsupported/test/cxx11_tensor_builtins_sycl.cpp
@@ -100,7 +100,7 @@ template <typename T> T inverse(T x) { return 1 / x; }
#define TEST_IS_THAT_RETURNS_BOOL(SCALAR, FUNC) \
{ \
- /* out OPERATOR in.FUNC() */ \
+ /* out = in.FUNC() */ \
Tensor<SCALAR, 3> in(tensorRange); \
Tensor<bool, 3> out(tensorRange); \
in = in.random() + static_cast<SCALAR>(0.01); \
@@ -136,11 +136,13 @@ static void test_builtin_unary_sycl(const Eigen::SyclDevice &sycl_device) {
array<int, 3> tensorRange = {{sizeDim1, sizeDim2, sizeDim3}};
TEST_UNARY_BUILTINS(float)
+ /// your GPU must support double. Otherwise, disable the double test.
TEST_UNARY_BUILTINS(double)
}
void test_cxx11_tensor_builtins_sycl() {
cl::sycl::gpu_selector s;
- Eigen::SyclDevice sycl_device(s);
+ QueueInterface queueInterface(s);
+ Eigen::SyclDevice sycl_device(&queueInterface);
CALL_SUBTEST(test_builtin_unary_sycl(sycl_device));
}
diff --git a/unsupported/test/cxx11_tensor_device_sycl.cpp b/unsupported/test/cxx11_tensor_device_sycl.cpp
index 8289959eb..a51062d23 100644
--- a/unsupported/test/cxx11_tensor_device_sycl.cpp
+++ b/unsupported/test/cxx11_tensor_device_sycl.cpp
@@ -21,42 +21,59 @@
#include <unsupported/Eigen/CXX11/Tensor>
#include<stdint.h>
-void test_device_memory(const Eigen::SyclDevice &sycl_device) {
- std::cout << "Running on: "
- << sycl_device.m_queue.get_device(). template get_info<cl::sycl::info::device::name>()
- << std::endl;
+template <typename DataType, int DataLayout>
+void test_device_sycl(const Eigen::SyclDevice &sycl_device) {
+ std::cout <<"Hello from ComputeCpp: the requested device exists and the device name is : "
+ << sycl_device.sycl_queue().get_device(). template get_info<cl::sycl::info::device::name>() <<std::endl;
int sizeDim1 = 100;
-
array<int, 1> tensorRange = {{sizeDim1}};
- Tensor<int, 1> in(tensorRange);
- Tensor<int, 1> in1(tensorRange);
- memset(in1.data(), 1,in1.size()*sizeof(int));
- int* gpu_in_data = static_cast<int*>(sycl_device.allocate(in.size()*sizeof(int)));
- sycl_device.memset(gpu_in_data, 1, in.size()*sizeof(int) );
- sycl_device.memcpyDeviceToHost(in.data(), gpu_in_data, in.size()*sizeof(int) );
+ Tensor<DataType, 1, DataLayout> in(tensorRange);
+ Tensor<DataType, 1, DataLayout> in1(tensorRange);
+ memset(in1.data(), 1,in1.size()*sizeof(DataType));
+ DataType * gpu_in_data = static_cast<DataType*>(sycl_device.allocate(in.size()*sizeof(DataType)));
+ sycl_device.memset(gpu_in_data, 1,in.size()*sizeof(DataType) );
+ sycl_device.memcpyDeviceToHost(in.data(), gpu_in_data, in.size()*sizeof(DataType) );
for (int i=0; i<in.size(); i++) {
VERIFY_IS_APPROX(in(i), in1(i));
}
sycl_device.deallocate(gpu_in_data);
}
-
+template <typename DataType, int DataLayout>
void test_device_exceptions(const Eigen::SyclDevice &sycl_device) {
- VERIFY(sycl_device.ok());
- array<int, 1> tensorDims = {{100}};
- int* gpu_data = static_cast<int*>(sycl_device.allocate(100*sizeof(int)));
- TensorMap<Tensor<int, 1>> in(gpu_data, tensorDims);
- TensorMap<Tensor<int, 1>> out(gpu_data, tensorDims);
- out.device(sycl_device) = in / in.constant(0);
- VERIFY(!sycl_device.ok());
+ bool threw_exception = false;
+ int sizeDim1 = 100;
+ array<int, 1> tensorDims = {{sizeDim1}};
+ DataType* gpu_data = static_cast<DataType*>(sycl_device.allocate(sizeDim1*sizeof(DataType)));
+ TensorMap<Tensor<DataType, 1,DataLayout>> in(gpu_data, tensorDims);
+ TensorMap<Tensor<DataType, 1,DataLayout>> out(gpu_data, tensorDims);
+ try {
+ out.device(sycl_device) = in / in.constant(0);
+ } catch(...) {
+ threw_exception = true;
+ }
+ VERIFY(threw_exception);
sycl_device.deallocate(gpu_data);
}
+template<typename DataType, typename dev_Selector> void sycl_device_test_per_device(dev_Selector s){
+ QueueInterface queueInterface(s);
+ auto sycl_device = Eigen::SyclDevice(&queueInterface);
+ test_device_sycl<DataType, RowMajor>(sycl_device);
+ test_device_sycl<DataType, ColMajor>(sycl_device);
+ /// this test throw an exeption. enable it if you want to see the exception
+ // test_device_exceptions<DataType, RowMajor>(sycl_device);
+ /// this test throw an exeption. enable it if you want to see the exception
+ // test_device_exceptions<DataType, ColMajor>(sycl_device);
+
+}
void test_cxx11_tensor_device_sycl() {
- cl::sycl::gpu_selector s;
- Eigen::SyclDevice sycl_device(s);
- CALL_SUBTEST(test_device_memory(sycl_device));
- // This deadlocks
- //CALL_SUBTEST(test_device_exceptions(sycl_device));
+ printf("Test on GPU: OpenCL\n");
+ CALL_SUBTEST(sycl_device_test_per_device<int>((cl::sycl::gpu_selector())));
+ printf("repeating the test on CPU: OpenCL\n");
+ CALL_SUBTEST(sycl_device_test_per_device<int>((cl::sycl::cpu_selector())));
+ printf("repeating the test on CPU: HOST\n");
+ CALL_SUBTEST(sycl_device_test_per_device<int>((cl::sycl::host_selector())));
+ printf("Test Passed******************\n" );
}
diff --git a/unsupported/test/cxx11_tensor_forced_eval_sycl.cpp b/unsupported/test/cxx11_tensor_forced_eval_sycl.cpp
index 5690da723..70b182558 100644
--- a/unsupported/test/cxx11_tensor_forced_eval_sycl.cpp
+++ b/unsupported/test/cxx11_tensor_forced_eval_sycl.cpp
@@ -21,33 +21,33 @@
#include <unsupported/Eigen/CXX11/Tensor>
using Eigen::Tensor;
-
+template <typename DataType, int DataLayout>
void test_forced_eval_sycl(const Eigen::SyclDevice &sycl_device) {
int sizeDim1 = 100;
- int sizeDim2 = 200;
- int sizeDim3 = 200;
+ int sizeDim2 = 20;
+ int sizeDim3 = 20;
Eigen::array<int, 3> tensorRange = {{sizeDim1, sizeDim2, sizeDim3}};
- Eigen::Tensor<float, 3> in1(tensorRange);
- Eigen::Tensor<float, 3> in2(tensorRange);
- Eigen::Tensor<float, 3> out(tensorRange);
+ Eigen::Tensor<DataType, 3, DataLayout> in1(tensorRange);
+ Eigen::Tensor<DataType, 3, DataLayout> in2(tensorRange);
+ Eigen::Tensor<DataType, 3, DataLayout> out(tensorRange);
- float * gpu_in1_data = static_cast<float*>(sycl_device.allocate(in1.dimensions().TotalSize()*sizeof(float)));
- float * gpu_in2_data = static_cast<float*>(sycl_device.allocate(in2.dimensions().TotalSize()*sizeof(float)));
- float * gpu_out_data = static_cast<float*>(sycl_device.allocate(out.dimensions().TotalSize()*sizeof(float)));
+ DataType * gpu_in1_data = static_cast<DataType*>(sycl_device.allocate(in1.dimensions().TotalSize()*sizeof(DataType)));
+ DataType * gpu_in2_data = static_cast<DataType*>(sycl_device.allocate(in2.dimensions().TotalSize()*sizeof(DataType)));
+ DataType * gpu_out_data = static_cast<DataType*>(sycl_device.allocate(out.dimensions().TotalSize()*sizeof(DataType)));
in1 = in1.random() + in1.constant(10.0f);
in2 = in2.random() + in2.constant(10.0f);
// creating TensorMap from tensor
- Eigen::TensorMap<Eigen::Tensor<float, 3>> gpu_in1(gpu_in1_data, tensorRange);
- Eigen::TensorMap<Eigen::Tensor<float, 3>> gpu_in2(gpu_in2_data, tensorRange);
- Eigen::TensorMap<Eigen::Tensor<float, 3>> gpu_out(gpu_out_data, tensorRange);
- sycl_device.memcpyHostToDevice(gpu_in1_data, in1.data(),(in1.dimensions().TotalSize())*sizeof(float));
- sycl_device.memcpyHostToDevice(gpu_in2_data, in2.data(),(in1.dimensions().TotalSize())*sizeof(float));
+ Eigen::TensorMap<Eigen::Tensor<DataType, 3, DataLayout>> gpu_in1(gpu_in1_data, tensorRange);
+ Eigen::TensorMap<Eigen::Tensor<DataType, 3, DataLayout>> gpu_in2(gpu_in2_data, tensorRange);
+ Eigen::TensorMap<Eigen::Tensor<DataType, 3, DataLayout>> gpu_out(gpu_out_data, tensorRange);
+ sycl_device.memcpyHostToDevice(gpu_in1_data, in1.data(),(in1.dimensions().TotalSize())*sizeof(DataType));
+ sycl_device.memcpyHostToDevice(gpu_in2_data, in2.data(),(in1.dimensions().TotalSize())*sizeof(DataType));
/// c=(a+b)*b
gpu_out.device(sycl_device) =(gpu_in1 + gpu_in2).eval() * gpu_in2;
- sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(float));
+ sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(DataType));
for (int i = 0; i < sizeDim1; ++i) {
for (int j = 0; j < sizeDim2; ++j) {
for (int k = 0; k < sizeDim3; ++k) {
@@ -63,8 +63,19 @@ void test_forced_eval_sycl(const Eigen::SyclDevice &sycl_device) {
}
+template <typename DataType, typename Dev_selector> void tensorForced_evalperDevice(Dev_selector s){
+ QueueInterface queueInterface(s);
+ auto sycl_device = Eigen::SyclDevice(&queueInterface);
+ test_forced_eval_sycl<DataType, RowMajor>(sycl_device);
+ test_forced_eval_sycl<DataType, ColMajor>(sycl_device);
+}
void test_cxx11_tensor_forced_eval_sycl() {
- cl::sycl::gpu_selector s;
- Eigen::SyclDevice sycl_device(s);
- CALL_SUBTEST(test_forced_eval_sycl(sycl_device));
+
+ printf("Test on GPU: OpenCL\n");
+ CALL_SUBTEST(tensorForced_evalperDevice<float>((cl::sycl::gpu_selector())));
+ printf("repeating the test on CPU: OpenCL\n");
+ CALL_SUBTEST(tensorForced_evalperDevice<float>((cl::sycl::cpu_selector())));
+ printf("repeating the test on CPU: HOST\n");
+ CALL_SUBTEST(tensorForced_evalperDevice<float>((cl::sycl::host_selector())));
+ printf("Test Passed******************\n" );
}
diff --git a/unsupported/test/cxx11_tensor_morphing_sycl.cpp b/unsupported/test/cxx11_tensor_morphing_sycl.cpp
index 8a03b826e..a16e1caf5 100644
--- a/unsupported/test/cxx11_tensor_morphing_sycl.cpp
+++ b/unsupported/test/cxx11_tensor_morphing_sycl.cpp
@@ -28,7 +28,7 @@ using Eigen::SyclDevice;
using Eigen::Tensor;
using Eigen::TensorMap;
-
+template <typename DataType, int DataLayout>
static void test_simple_slice(const Eigen::SyclDevice &sycl_device)
{
int sizeDim1 = 2;
@@ -37,31 +37,31 @@ static void test_simple_slice(const Eigen::SyclDevice &sycl_device)
int sizeDim4 = 7;
int sizeDim5 = 11;
array<int, 5> tensorRange = {{sizeDim1, sizeDim2, sizeDim3, sizeDim4, sizeDim5}};
- Tensor<float, 5> tensor(tensorRange);
+ Tensor<DataType, 5,DataLayout> tensor(tensorRange);
tensor.setRandom();
array<int, 5> slice1_range ={{1, 1, 1, 1, 1}};
- Tensor<float, 5> slice1(slice1_range);
+ Tensor<DataType, 5,DataLayout> slice1(slice1_range);
- float* gpu_data1 = static_cast<float*>(sycl_device.allocate(tensor.size()*sizeof(float)));
- float* gpu_data2 = static_cast<float*>(sycl_device.allocate(slice1.size()*sizeof(float)));
- TensorMap<Tensor<float, 5>> gpu1(gpu_data1, tensorRange);
- TensorMap<Tensor<float, 5>> gpu2(gpu_data2, slice1_range);
+ DataType* gpu_data1 = static_cast<DataType*>(sycl_device.allocate(tensor.size()*sizeof(DataType)));
+ DataType* gpu_data2 = static_cast<DataType*>(sycl_device.allocate(slice1.size()*sizeof(DataType)));
+ TensorMap<Tensor<DataType, 5,DataLayout>> gpu1(gpu_data1, tensorRange);
+ TensorMap<Tensor<DataType, 5,DataLayout>> gpu2(gpu_data2, slice1_range);
Eigen::DSizes<ptrdiff_t, 5> indices(1,2,3,4,5);
Eigen::DSizes<ptrdiff_t, 5> sizes(1,1,1,1,1);
- sycl_device.memcpyHostToDevice(gpu_data1, tensor.data(),(tensor.size())*sizeof(float));
+ sycl_device.memcpyHostToDevice(gpu_data1, tensor.data(),(tensor.size())*sizeof(DataType));
gpu2.device(sycl_device)=gpu1.slice(indices, sizes);
- sycl_device.memcpyDeviceToHost(slice1.data(), gpu_data2,(slice1.size())*sizeof(float));
+ sycl_device.memcpyDeviceToHost(slice1.data(), gpu_data2,(slice1.size())*sizeof(DataType));
VERIFY_IS_EQUAL(slice1(0,0,0,0,0), tensor(1,2,3,4,5));
array<int, 5> slice2_range ={{1,1,2,2,3}};
- Tensor<float, 5> slice2(slice2_range);
- float* gpu_data3 = static_cast<float*>(sycl_device.allocate(slice2.size()*sizeof(float)));
- TensorMap<Tensor<float, 5>> gpu3(gpu_data3, slice2_range);
+ Tensor<DataType, 5,DataLayout> slice2(slice2_range);
+ DataType* gpu_data3 = static_cast<DataType*>(sycl_device.allocate(slice2.size()*sizeof(DataType)));
+ TensorMap<Tensor<DataType, 5,DataLayout>> gpu3(gpu_data3, slice2_range);
Eigen::DSizes<ptrdiff_t, 5> indices2(1,1,3,4,5);
Eigen::DSizes<ptrdiff_t, 5> sizes2(1,1,2,2,3);
gpu3.device(sycl_device)=gpu1.slice(indices2, sizes2);
- sycl_device.memcpyDeviceToHost(slice2.data(), gpu_data3,(slice2.size())*sizeof(float));
+ sycl_device.memcpyDeviceToHost(slice2.data(), gpu_data3,(slice2.size())*sizeof(DataType));
for (int i = 0; i < 2; ++i) {
for (int j = 0; j < 2; ++j) {
for (int k = 0; k < 3; ++k) {
@@ -74,11 +74,22 @@ static void test_simple_slice(const Eigen::SyclDevice &sycl_device)
sycl_device.deallocate(gpu_data3);
}
+template<typename DataType, typename dev_Selector> void sycl_slicing_test_per_device(dev_Selector s){
+ QueueInterface queueInterface(s);
+ auto sycl_device = Eigen::SyclDevice(&queueInterface);
+ test_simple_slice<DataType, RowMajor>(sycl_device);
+ test_simple_slice<DataType, ColMajor>(sycl_device);
+}
void test_cxx11_tensor_morphing_sycl()
{
/// Currentlly it only works on cpu. Adding GPU cause LLVM ERROR in cunstructing OpenCL Kernel at runtime.
- cl::sycl::cpu_selector s;
- Eigen::SyclDevice sycl_device(s);
- CALL_SUBTEST(test_simple_slice(sycl_device));
+// printf("Test on GPU: OpenCL\n");
+// CALL_SUBTEST(sycl_device_test_per_device((cl::sycl::gpu_selector())));
+ printf("repeating the test on CPU: OpenCL\n");
+ CALL_SUBTEST(sycl_slicing_test_per_device<float>((cl::sycl::cpu_selector())));
+ printf("repeating the test on CPU: HOST\n");
+ CALL_SUBTEST(sycl_slicing_test_per_device<float>((cl::sycl::host_selector())));
+ printf("Test Passed******************\n" );
+
}
diff --git a/unsupported/test/cxx11_tensor_reduction_sycl.cpp b/unsupported/test/cxx11_tensor_reduction_sycl.cpp
index a9ef82907..6b62737b8 100644
--- a/unsupported/test/cxx11_tensor_reduction_sycl.cpp
+++ b/unsupported/test/cxx11_tensor_reduction_sycl.cpp
@@ -21,37 +21,37 @@
#include <unsupported/Eigen/CXX11/Tensor>
-
+template <typename DataType, int DataLayout>
static void test_full_reductions_sycl(const Eigen::SyclDevice& sycl_device) {
const int num_rows = 452;
const int num_cols = 765;
array<int, 2> tensorRange = {{num_rows, num_cols}};
- Tensor<float, 2> in(tensorRange);
- Tensor<float, 0> full_redux;
- Tensor<float, 0> full_redux_gpu;
+ Tensor<DataType, 2, DataLayout> in(tensorRange);
+ Tensor<DataType, 0, DataLayout> full_redux;
+ Tensor<DataType, 0, DataLayout> full_redux_gpu;
in.setRandom();
full_redux = in.sum();
- float* gpu_in_data = static_cast<float*>(sycl_device.allocate(in.dimensions().TotalSize()*sizeof(float)));
- float* gpu_out_data =(float*)sycl_device.allocate(sizeof(float));
+ DataType* gpu_in_data = static_cast<DataType*>(sycl_device.allocate(in.dimensions().TotalSize()*sizeof(DataType)));
+ DataType* gpu_out_data =(DataType*)sycl_device.allocate(sizeof(DataType));
- TensorMap<Tensor<float, 2> > in_gpu(gpu_in_data, tensorRange);
- TensorMap<Tensor<float, 0> > out_gpu(gpu_out_data);
+ TensorMap<Tensor<DataType, 2, DataLayout> > in_gpu(gpu_in_data, tensorRange);
+ TensorMap<Tensor<DataType, 0, DataLayout> > out_gpu(gpu_out_data);
- sycl_device.memcpyHostToDevice(gpu_in_data, in.data(),(in.dimensions().TotalSize())*sizeof(float));
+ sycl_device.memcpyHostToDevice(gpu_in_data, in.data(),(in.dimensions().TotalSize())*sizeof(DataType));
out_gpu.device(sycl_device) = in_gpu.sum();
- sycl_device.memcpyDeviceToHost(full_redux_gpu.data(), gpu_out_data, sizeof(float));
+ sycl_device.memcpyDeviceToHost(full_redux_gpu.data(), gpu_out_data, sizeof(DataType));
// Check that the CPU and GPU reductions return the same result.
VERIFY_IS_APPROX(full_redux_gpu(), full_redux());
sycl_device.deallocate(gpu_in_data);
sycl_device.deallocate(gpu_out_data);
}
-
+template <typename DataType, int DataLayout>
static void test_first_dim_reductions_sycl(const Eigen::SyclDevice& sycl_device) {
int dim_x = 145;
@@ -63,23 +63,23 @@ static void test_first_dim_reductions_sycl(const Eigen::SyclDevice& sycl_device)
red_axis[0] = 0;
array<int, 2> reduced_tensorRange = {{dim_y, dim_z}};
- Tensor<float, 3> in(tensorRange);
- Tensor<float, 2> redux(reduced_tensorRange);
- Tensor<float, 2> redux_gpu(reduced_tensorRange);
+ Tensor<DataType, 3, DataLayout> in(tensorRange);
+ Tensor<DataType, 2, DataLayout> redux(reduced_tensorRange);
+ Tensor<DataType, 2, DataLayout> redux_gpu(reduced_tensorRange);
in.setRandom();
redux= in.sum(red_axis);
- float* gpu_in_data = static_cast<float*>(sycl_device.allocate(in.dimensions().TotalSize()*sizeof(float)));
- float* gpu_out_data = static_cast<float*>(sycl_device.allocate(redux_gpu.dimensions().TotalSize()*sizeof(float)));
+ DataType* gpu_in_data = static_cast<DataType*>(sycl_device.allocate(in.dimensions().TotalSize()*sizeof(DataType)));
+ DataType* gpu_out_data = static_cast<DataType*>(sycl_device.allocate(redux_gpu.dimensions().TotalSize()*sizeof(DataType)));
- TensorMap<Tensor<float, 3> > in_gpu(gpu_in_data, tensorRange);
- TensorMap<Tensor<float, 2> > out_gpu(gpu_out_data, reduced_tensorRange);
+ TensorMap<Tensor<DataType, 3, DataLayout> > in_gpu(gpu_in_data, tensorRange);
+ TensorMap<Tensor<DataType, 2, DataLayout> > out_gpu(gpu_out_data, reduced_tensorRange);
- sycl_device.memcpyHostToDevice(gpu_in_data, in.data(),(in.dimensions().TotalSize())*sizeof(float));
+ sycl_device.memcpyHostToDevice(gpu_in_data, in.data(),(in.dimensions().TotalSize())*sizeof(DataType));
out_gpu.device(sycl_device) = in_gpu.sum(red_axis);
- sycl_device.memcpyDeviceToHost(redux_gpu.data(), gpu_out_data, redux_gpu.dimensions().TotalSize()*sizeof(float));
+ sycl_device.memcpyDeviceToHost(redux_gpu.data(), gpu_out_data, redux_gpu.dimensions().TotalSize()*sizeof(DataType));
// Check that the CPU and GPU reductions return the same result.
for(int j=0; j<reduced_tensorRange[0]; j++ )
@@ -90,6 +90,7 @@ static void test_first_dim_reductions_sycl(const Eigen::SyclDevice& sycl_device)
sycl_device.deallocate(gpu_out_data);
}
+template <typename DataType, int DataLayout>
static void test_last_dim_reductions_sycl(const Eigen::SyclDevice &sycl_device) {
int dim_x = 567;
@@ -101,23 +102,23 @@ static void test_last_dim_reductions_sycl(const Eigen::SyclDevice &sycl_device)
red_axis[0] = 2;
array<int, 2> reduced_tensorRange = {{dim_x, dim_y}};
- Tensor<float, 3> in(tensorRange);
- Tensor<float, 2> redux(reduced_tensorRange);
- Tensor<float, 2> redux_gpu(reduced_tensorRange);
+ Tensor<DataType, 3, DataLayout> in(tensorRange);
+ Tensor<DataType, 2, DataLayout> redux(reduced_tensorRange);
+ Tensor<DataType, 2, DataLayout> redux_gpu(reduced_tensorRange);
in.setRandom();
redux= in.sum(red_axis);
- float* gpu_in_data = static_cast<float*>(sycl_device.allocate(in.dimensions().TotalSize()*sizeof(float)));
- float* gpu_out_data = static_cast<float*>(sycl_device.allocate(redux_gpu.dimensions().TotalSize()*sizeof(float)));
+ DataType* gpu_in_data = static_cast<DataType*>(sycl_device.allocate(in.dimensions().TotalSize()*sizeof(DataType)));
+ DataType* gpu_out_data = static_cast<DataType*>(sycl_device.allocate(redux_gpu.dimensions().TotalSize()*sizeof(DataType)));
- TensorMap<Tensor<float, 3> > in_gpu(gpu_in_data, tensorRange);
- TensorMap<Tensor<float, 2> > out_gpu(gpu_out_data, reduced_tensorRange);
+ TensorMap<Tensor<DataType, 3, DataLayout> > in_gpu(gpu_in_data, tensorRange);
+ TensorMap<Tensor<DataType, 2, DataLayout> > out_gpu(gpu_out_data, reduced_tensorRange);
- sycl_device.memcpyHostToDevice(gpu_in_data, in.data(),(in.dimensions().TotalSize())*sizeof(float));
+ sycl_device.memcpyHostToDevice(gpu_in_data, in.data(),(in.dimensions().TotalSize())*sizeof(DataType));
out_gpu.device(sycl_device) = in_gpu.sum(red_axis);
- sycl_device.memcpyDeviceToHost(redux_gpu.data(), gpu_out_data, redux_gpu.dimensions().TotalSize()*sizeof(float));
+ sycl_device.memcpyDeviceToHost(redux_gpu.data(), gpu_out_data, redux_gpu.dimensions().TotalSize()*sizeof(DataType));
// Check that the CPU and GPU reductions return the same result.
for(int j=0; j<reduced_tensorRange[0]; j++ )
for(int k=0; k<reduced_tensorRange[1]; k++ )
@@ -127,12 +128,22 @@ static void test_last_dim_reductions_sycl(const Eigen::SyclDevice &sycl_device)
sycl_device.deallocate(gpu_out_data);
}
-
+template<typename DataType, typename dev_Selector> void sycl_reduction_test_per_device(dev_Selector s){
+ QueueInterface queueInterface(s);
+ auto sycl_device = Eigen::SyclDevice(&queueInterface);
+ test_full_reductions_sycl<DataType, RowMajor>(sycl_device);
+ test_first_dim_reductions_sycl<DataType, RowMajor>(sycl_device);
+ test_last_dim_reductions_sycl<DataType, RowMajor>(sycl_device);
+ test_full_reductions_sycl<DataType, ColMajor>(sycl_device);
+ test_first_dim_reductions_sycl<DataType, ColMajor>(sycl_device);
+ test_last_dim_reductions_sycl<DataType, ColMajor>(sycl_device);
+}
void test_cxx11_tensor_reduction_sycl() {
- cl::sycl::gpu_selector s;
- Eigen::SyclDevice sycl_device(s);
- CALL_SUBTEST((test_full_reductions_sycl(sycl_device)));
- CALL_SUBTEST((test_first_dim_reductions_sycl(sycl_device)));
- CALL_SUBTEST((test_last_dim_reductions_sycl(sycl_device)));
-
+ printf("Test on GPU: OpenCL\n");
+ CALL_SUBTEST(sycl_reduction_test_per_device<float>((cl::sycl::gpu_selector())));
+ printf("repeating the test on CPU: OpenCL\n");
+ CALL_SUBTEST(sycl_reduction_test_per_device<float>((cl::sycl::cpu_selector())));
+ printf("repeating the test on CPU: HOST\n");
+ CALL_SUBTEST(sycl_reduction_test_per_device<float>((cl::sycl::host_selector())));
+ printf("Test Passed******************\n" );
}
diff --git a/unsupported/test/cxx11_tensor_sycl.cpp b/unsupported/test/cxx11_tensor_sycl.cpp
index 05fbf9e62..bf115d652 100644
--- a/unsupported/test/cxx11_tensor_sycl.cpp
+++ b/unsupported/test/cxx11_tensor_sycl.cpp
@@ -26,35 +26,32 @@ using Eigen::array;
using Eigen::SyclDevice;
using Eigen::Tensor;
using Eigen::TensorMap;
-
+template <typename DataType, int DataLayout>
void test_sycl_mem_transfers(const Eigen::SyclDevice &sycl_device) {
int sizeDim1 = 100;
- int sizeDim2 = 100;
- int sizeDim3 = 100;
+ int sizeDim2 = 10;
+ int sizeDim3 = 20;
array<int, 3> tensorRange = {{sizeDim1, sizeDim2, sizeDim3}};
- Tensor<float, 3> in1(tensorRange);
- Tensor<float, 3> out1(tensorRange);
- Tensor<float, 3> out2(tensorRange);
- Tensor<float, 3> out3(tensorRange);
+ Tensor<DataType, 3, DataLayout> in1(tensorRange);
+ Tensor<DataType, 3, DataLayout> out1(tensorRange);
+ Tensor<DataType, 3, DataLayout> out2(tensorRange);
+ Tensor<DataType, 3, DataLayout> out3(tensorRange);
in1 = in1.random();
- float* gpu_data1 = static_cast<float*>(sycl_device.allocate(in1.size()*sizeof(float)));
- float* gpu_data2 = static_cast<float*>(sycl_device.allocate(out1.size()*sizeof(float)));
- //float* gpu_data = static_cast<float*>(sycl_device.allocate(out2.size()*sizeof(float)));
+ DataType* gpu_data1 = static_cast<DataType*>(sycl_device.allocate(in1.size()*sizeof(DataType)));
+ DataType* gpu_data2 = static_cast<DataType*>(sycl_device.allocate(out1.size()*sizeof(DataType)));
+
+ TensorMap<Tensor<DataType, 3, DataLayout>> gpu1(gpu_data1, tensorRange);
+ TensorMap<Tensor<DataType, 3, DataLayout>> gpu2(gpu_data2, tensorRange);
- TensorMap<Tensor<float, 3>> gpu1(gpu_data1, tensorRange);
- TensorMap<Tensor<float, 3>> gpu2(gpu_data2, tensorRange);
- //TensorMap<Tensor<float, 3>> gpu_out2(gpu_out2_data, tensorRange);
-
- sycl_device.memcpyHostToDevice(gpu_data1, in1.data(),(in1.size())*sizeof(float));
- sycl_device.memcpyHostToDevice(gpu_data2, in1.data(),(in1.size())*sizeof(float));
+ sycl_device.memcpyHostToDevice(gpu_data1, in1.data(),(in1.size())*sizeof(DataType));
+ sycl_device.memcpyHostToDevice(gpu_data2, in1.data(),(in1.size())*sizeof(DataType));
gpu1.device(sycl_device) = gpu1 * 3.14f;
gpu2.device(sycl_device) = gpu2 * 2.7f;
- sycl_device.memcpyDeviceToHost(out1.data(), gpu_data1,(out1.size())*sizeof(float));
- sycl_device.memcpyDeviceToHost(out2.data(), gpu_data1,(out2.size())*sizeof(float));
- sycl_device.memcpyDeviceToHost(out3.data(), gpu_data2,(out3.size())*sizeof(float));
- // sycl_device.Synchronize();
+ sycl_device.memcpyDeviceToHost(out1.data(), gpu_data1,(out1.size())*sizeof(DataType));
+ sycl_device.memcpyDeviceToHost(out2.data(), gpu_data1,(out2.size())*sizeof(DataType));
+ sycl_device.memcpyDeviceToHost(out3.data(), gpu_data2,(out3.size())*sizeof(DataType));
for (int i = 0; i < in1.size(); ++i) {
VERIFY_IS_APPROX(out1(i), in1(i) * 3.14f);
@@ -65,34 +62,34 @@ void test_sycl_mem_transfers(const Eigen::SyclDevice &sycl_device) {
sycl_device.deallocate(gpu_data1);
sycl_device.deallocate(gpu_data2);
}
-
+template <typename DataType, int DataLayout>
void test_sycl_computations(const Eigen::SyclDevice &sycl_device) {
int sizeDim1 = 100;
- int sizeDim2 = 100;
- int sizeDim3 = 100;
+ int sizeDim2 = 10;
+ int sizeDim3 = 20;
array<int, 3> tensorRange = {{sizeDim1, sizeDim2, sizeDim3}};
- Tensor<float, 3> in1(tensorRange);
- Tensor<float, 3> in2(tensorRange);
- Tensor<float, 3> in3(tensorRange);
- Tensor<float, 3> out(tensorRange);
+ Tensor<DataType, 3,DataLayout> in1(tensorRange);
+ Tensor<DataType, 3,DataLayout> in2(tensorRange);
+ Tensor<DataType, 3,DataLayout> in3(tensorRange);
+ Tensor<DataType, 3,DataLayout> out(tensorRange);
in2 = in2.random();
in3 = in3.random();
- float * gpu_in1_data = static_cast<float*>(sycl_device.allocate(in1.size()*sizeof(float)));
- float * gpu_in2_data = static_cast<float*>(sycl_device.allocate(in2.size()*sizeof(float)));
- float * gpu_in3_data = static_cast<float*>(sycl_device.allocate(in3.size()*sizeof(float)));
- float * gpu_out_data = static_cast<float*>(sycl_device.allocate(out.size()*sizeof(float)));
+ DataType * gpu_in1_data = static_cast<DataType*>(sycl_device.allocate(in1.size()*sizeof(DataType)));
+ DataType * gpu_in2_data = static_cast<DataType*>(sycl_device.allocate(in2.size()*sizeof(DataType)));
+ DataType * gpu_in3_data = static_cast<DataType*>(sycl_device.allocate(in3.size()*sizeof(DataType)));
+ DataType * gpu_out_data = static_cast<DataType*>(sycl_device.allocate(out.size()*sizeof(DataType)));
- TensorMap<Tensor<float, 3>> gpu_in1(gpu_in1_data, tensorRange);
- TensorMap<Tensor<float, 3>> gpu_in2(gpu_in2_data, tensorRange);
- TensorMap<Tensor<float, 3>> gpu_in3(gpu_in3_data, tensorRange);
- TensorMap<Tensor<float, 3>> gpu_out(gpu_out_data, tensorRange);
+ TensorMap<Tensor<DataType, 3, DataLayout>> gpu_in1(gpu_in1_data, tensorRange);
+ TensorMap<Tensor<DataType, 3, DataLayout>> gpu_in2(gpu_in2_data, tensorRange);
+ TensorMap<Tensor<DataType, 3, DataLayout>> gpu_in3(gpu_in3_data, tensorRange);
+ TensorMap<Tensor<DataType, 3, DataLayout>> gpu_out(gpu_out_data, tensorRange);
/// a=1.2f
gpu_in1.device(sycl_device) = gpu_in1.constant(1.2f);
- sycl_device.memcpyDeviceToHost(in1.data(), gpu_in1_data ,(in1.size())*sizeof(float));
+ sycl_device.memcpyDeviceToHost(in1.data(), gpu_in1_data ,(in1.size())*sizeof(DataType));
for (int i = 0; i < sizeDim1; ++i) {
for (int j = 0; j < sizeDim2; ++j) {
for (int k = 0; k < sizeDim3; ++k) {
@@ -104,7 +101,7 @@ void test_sycl_computations(const Eigen::SyclDevice &sycl_device) {
/// a=b*1.2f
gpu_out.device(sycl_device) = gpu_in1 * 1.2f;
- sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data ,(out.size())*sizeof(float));
+ sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data ,(out.size())*sizeof(DataType));
for (int i = 0; i < sizeDim1; ++i) {
for (int j = 0; j < sizeDim2; ++j) {
for (int k = 0; k < sizeDim3; ++k) {
@@ -116,9 +113,9 @@ void test_sycl_computations(const Eigen::SyclDevice &sycl_device) {
printf("a=b*1.2f Test Passed\n");
/// c=a*b
- sycl_device.memcpyHostToDevice(gpu_in2_data, in2.data(),(in2.size())*sizeof(float));
+ sycl_device.memcpyHostToDevice(gpu_in2_data, in2.data(),(in2.size())*sizeof(DataType));
gpu_out.device(sycl_device) = gpu_in1 * gpu_in2;
- sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.size())*sizeof(float));
+ sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.size())*sizeof(DataType));
for (int i = 0; i < sizeDim1; ++i) {
for (int j = 0; j < sizeDim2; ++j) {
for (int k = 0; k < sizeDim3; ++k) {
@@ -132,7 +129,7 @@ void test_sycl_computations(const Eigen::SyclDevice &sycl_device) {
/// c=a+b
gpu_out.device(sycl_device) = gpu_in1 + gpu_in2;
- sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.size())*sizeof(float));
+ sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.size())*sizeof(DataType));
for (int i = 0; i < sizeDim1; ++i) {
for (int j = 0; j < sizeDim2; ++j) {
for (int k = 0; k < sizeDim3; ++k) {
@@ -146,7 +143,7 @@ void test_sycl_computations(const Eigen::SyclDevice &sycl_device) {
/// c=a*a
gpu_out.device(sycl_device) = gpu_in1 * gpu_in1;
- sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.size())*sizeof(float));
+ sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.size())*sizeof(DataType));
for (int i = 0; i < sizeDim1; ++i) {
for (int j = 0; j < sizeDim2; ++j) {
for (int k = 0; k < sizeDim3; ++k) {
@@ -160,7 +157,7 @@ void test_sycl_computations(const Eigen::SyclDevice &sycl_device) {
//a*3.14f + b*2.7f
gpu_out.device(sycl_device) = gpu_in1 * gpu_in1.constant(3.14f) + gpu_in2 * gpu_in2.constant(2.7f);
- sycl_device.memcpyDeviceToHost(out.data(),gpu_out_data,(out.size())*sizeof(float));
+ sycl_device.memcpyDeviceToHost(out.data(),gpu_out_data,(out.size())*sizeof(DataType));
for (int i = 0; i < sizeDim1; ++i) {
for (int j = 0; j < sizeDim2; ++j) {
for (int k = 0; k < sizeDim3; ++k) {
@@ -173,9 +170,9 @@ void test_sycl_computations(const Eigen::SyclDevice &sycl_device) {
printf("a*3.14f + b*2.7f Test Passed\n");
///d= (a>0.5? b:c)
- sycl_device.memcpyHostToDevice(gpu_in3_data, in3.data(),(in3.size())*sizeof(float));
+ sycl_device.memcpyHostToDevice(gpu_in3_data, in3.data(),(in3.size())*sizeof(DataType));
gpu_out.device(sycl_device) =(gpu_in1 > gpu_in1.constant(0.5f)).select(gpu_in2, gpu_in3);
- sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.size())*sizeof(float));
+ sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.size())*sizeof(DataType));
for (int i = 0; i < sizeDim1; ++i) {
for (int j = 0; j < sizeDim2; ++j) {
for (int k = 0; k < sizeDim3; ++k) {
@@ -191,10 +188,20 @@ void test_sycl_computations(const Eigen::SyclDevice &sycl_device) {
sycl_device.deallocate(gpu_in3_data);
sycl_device.deallocate(gpu_out_data);
}
-
+template<typename DataType, typename dev_Selector> void sycl_computing_test_per_device(dev_Selector s){
+ QueueInterface queueInterface(s);
+ auto sycl_device = Eigen::SyclDevice(&queueInterface);
+ test_sycl_mem_transfers<DataType, RowMajor>(sycl_device);
+ test_sycl_computations<DataType, RowMajor>(sycl_device);
+ test_sycl_mem_transfers<DataType, ColMajor>(sycl_device);
+ test_sycl_computations<DataType, ColMajor>(sycl_device);
+}
void test_cxx11_tensor_sycl() {
- cl::sycl::gpu_selector s;
- Eigen::SyclDevice sycl_device(s);
- CALL_SUBTEST(test_sycl_mem_transfers(sycl_device));
- CALL_SUBTEST(test_sycl_computations(sycl_device));
+ printf("Test on GPU: OpenCL\n");
+ CALL_SUBTEST(sycl_computing_test_per_device<float>((cl::sycl::gpu_selector())));
+ printf("repeating the test on CPU: OpenCL\n");
+ CALL_SUBTEST(sycl_computing_test_per_device<float>((cl::sycl::cpu_selector())));
+ printf("repeating the test on CPU: HOST\n");
+ CALL_SUBTEST(sycl_computing_test_per_device<float>((cl::sycl::host_selector())));
+ printf("Test Passed******************\n" );
}