aboutsummaryrefslogtreecommitdiffhomepage
path: root/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h
diff options
context:
space:
mode:
authorGravatar Benoit Steiner <benoit.steiner.goog@gmail.com>2017-04-05 14:28:08 +0000
committerGravatar Benoit Steiner <benoit.steiner.goog@gmail.com>2017-04-05 14:28:08 +0000
commit0d08165a7f7a95c35dead32ec2d567e9a4b609b0 (patch)
treeac7c8a2f553056f3f3da70592fa821cec8aa883d /unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h
parent4910630c968f9803bef5572bf7f17767c2ef09f1 (diff)
parent068cc0970890b534d65dbc99e6b5795acbaaa801 (diff)
Merged in benoitsteiner/opencl (pull request PR-309)
OpenCL improvements
Diffstat (limited to 'unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h')
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h411
1 files changed, 266 insertions, 145 deletions
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h b/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h
index e209799bb..c5142b7c9 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h
@@ -15,9 +15,22 @@
#if defined(EIGEN_USE_SYCL) && !defined(EIGEN_CXX11_TENSOR_TENSOR_DEVICE_SYCL_H)
#define EIGEN_CXX11_TENSOR_TENSOR_DEVICE_SYCL_H
+template <typename Scalar, size_t Align = EIGEN_MAX_ALIGN_BYTES, class Allocator = std::allocator<Scalar>>
+struct SyclAllocator {
+ typedef Scalar value_type;
+ typedef typename std::allocator_traits<Allocator>::pointer pointer;
+ typedef typename std::allocator_traits<Allocator>::size_type size_type;
+
+ SyclAllocator( ){};
+ Scalar* allocate(std::size_t elements) { return static_cast<Scalar*>(aligned_alloc(Align, elements)); }
+ void deallocate(Scalar * p, std::size_t size) { EIGEN_UNUSED_VARIABLE(size); free(p); }
+};
+
namespace Eigen {
#define ConvertToActualTypeSycl(Scalar, buf_acc) reinterpret_cast<typename cl::sycl::global_ptr<Scalar>::pointer_t>((&(*buf_acc.get_pointer())))
+ #define ConvertToActualSyclOffset(Scalar, offset) offset/sizeof(Scalar)
+
template <typename Scalar, typename read_accessor, typename write_accessor> class MemCopyFunctor {
public:
@@ -40,27 +53,50 @@ namespace Eigen {
size_t m_offset;
};
+template<typename AccType>
struct memsetkernelFunctor{
- typedef cl::sycl::accessor<uint8_t, 1, cl::sycl::access::mode::discard_write, cl::sycl::access::target::global_buffer> AccType;
AccType m_acc;
+ const ptrdiff_t buff_offset;
const size_t m_rng, m_c;
- memsetkernelFunctor(AccType acc, const size_t rng, const size_t c):m_acc(acc), m_rng(rng), m_c(c){}
+ memsetkernelFunctor(AccType acc, const ptrdiff_t buff_offset_, const size_t rng, const size_t c):m_acc(acc), buff_offset(buff_offset_), m_rng(rng), m_c(c){}
void operator()(cl::sycl::nd_item<1> itemID) {
auto globalid=itemID.get_global_linear_id();
- if (globalid< m_rng) m_acc[globalid] = m_c;
+ if (globalid< m_rng) m_acc[globalid + buff_offset] = m_c;
}
};
+struct memsetCghFunctor{
+ cl::sycl::buffer<uint8_t, 1, SyclAllocator<uint8_t, EIGEN_MAX_ALIGN_BYTES> >& m_buf;
+ const ptrdiff_t& buff_offset;
+ const size_t& rng , GRange, tileSize;
+ const int &c;
+ memsetCghFunctor(cl::sycl::buffer<uint8_t, 1, SyclAllocator<uint8_t, EIGEN_MAX_ALIGN_BYTES> >& buff, const ptrdiff_t& buff_offset_, const size_t& rng_, const size_t& GRange_, const size_t& tileSize_, const int& c_)
+ :m_buf(buff), buff_offset(buff_offset_), rng(rng_), GRange(GRange_), tileSize(tileSize_), c(c_){}
+
+ void operator()(cl::sycl::handler &cgh) const {
+ auto buf_acc = m_buf.template get_access<cl::sycl::access::mode::write, cl::sycl::access::target::global_buffer>(cgh);
+ typedef decltype(buf_acc) AccType;
+ cgh.parallel_for(cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(tileSize)), memsetkernelFunctor<AccType>(buf_acc, buff_offset, rng, c));
+ }
+};
+
+ //get_devices returns all the available opencl devices. Either use device_selector or exclude devices that computecpp does not support (AMD OpenCL for CPU and intel GPU)
EIGEN_STRONG_INLINE auto get_sycl_supported_devices()->decltype(cl::sycl::device::get_devices()){
auto devices = cl::sycl::device::get_devices();
std::vector<cl::sycl::device>::iterator it =devices.begin();
while(it!=devices.end()) {
- /// get_devices returns all the available opencl devices. Either use device_selector or exclude devices that computecpp does not support (AMD OpenCL for CPU )
- auto s= (*it).template get_info<cl::sycl::info::device::vendor>();
- std::transform(s.begin(), s.end(), s.begin(), ::tolower);
- if((*it).is_cpu() && s.find("amd")!=std::string::npos && s.find("apu") == std::string::npos){ // remove amd cpu as it is not supported by computecpp allow APUs
- it=devices.erase(it);
+ ///FIXME: Currently there is a bug in amd cpu OpenCL
+ auto name = (*it).template get_info<cl::sycl::info::device::name>();
+ std::transform(name.begin(), name.end(), name.begin(), ::tolower);
+ auto vendor = (*it).template get_info<cl::sycl::info::device::vendor>();
+ std::transform(vendor.begin(), vendor.end(), vendor.begin(), ::tolower);
+
+ if((*it).is_cpu() && vendor.find("amd")!=std::string::npos && vendor.find("apu") == std::string::npos){ // remove amd cpu as it is not supported by computecpp allow APUs
+ it = devices.erase(it);
+ //FIXME: currently there is a bug in intel gpu driver regarding memory allignment issue.
+ }else if((*it).is_gpu() && name.find("intel")!=std::string::npos){
+ it = devices.erase(it);
}
else{
++it;
@@ -69,18 +105,8 @@ EIGEN_STRONG_INLINE auto get_sycl_supported_devices()->decltype(cl::sycl::device
return devices;
}
-struct QueueInterface {
- /// class members:
- bool exception_caught_ = false;
-
- mutable std::mutex mutex_;
-
- /// std::map is the container used to make sure that we create only one buffer
- /// per pointer. The lifespan of the buffer now depends on the lifespan of SyclDevice.
- /// If a non-read-only pointer is needed to be accessed on the host we should manually deallocate it.
- mutable std::map<const uint8_t *, cl::sycl::buffer<uint8_t, 1>> buffer_map;
- /// sycl queue
- mutable cl::sycl::queue m_queue;
+class QueueInterface {
+public:
/// creating device by using cl::sycl::selector or cl::sycl::device both are the same and can be captured through dev_Selector typename
/// SyclStreamDevice is not owned. it is the caller's responsibility to destroy it.
template<typename dev_Selector> explicit QueueInterface(const dev_Selector& s):
@@ -116,11 +142,11 @@ m_queue(cl::sycl::queue(s, [&](cl::sycl::exception_list l) {
/// use this pointer as a key in our buffer_map and we make sure that we dedicate only one buffer only for this pointer.
/// The device pointer would be deleted by calling deallocate function.
EIGEN_STRONG_INLINE void* allocate(size_t num_bytes) const {
- auto buf = cl::sycl::buffer<uint8_t,1>(cl::sycl::range<1>(num_bytes));
+ std::lock_guard<std::mutex> lock(mutex_);
+ auto buf = cl::sycl::buffer<uint8_t,1, SyclAllocator<uint8_t, EIGEN_MAX_ALIGN_BYTES> >(cl::sycl::range<1>(num_bytes));
auto ptr =buf.get_access<cl::sycl::access::mode::discard_write, cl::sycl::access::target::host_buffer>().get_pointer();
buf.set_final_data(nullptr);
- std::lock_guard<std::mutex> lock(mutex_);
- buffer_map.insert(std::pair<const uint8_t *, cl::sycl::buffer<uint8_t, 1>>(static_cast<const uint8_t*>(ptr),buf));
+ buffer_map.insert(std::pair<const uint8_t *, cl::sycl::buffer<uint8_t, 1, SyclAllocator<uint8_t, EIGEN_MAX_ALIGN_BYTES> > >(static_cast<const uint8_t*>(ptr),buf));
return static_cast<void*>(ptr);
}
@@ -138,62 +164,113 @@ m_queue(cl::sycl::queue(s, [&](cl::sycl::exception_list l) {
std::lock_guard<std::mutex> lock(mutex_);
buffer_map.clear();
}
-
- EIGEN_STRONG_INLINE std::map<const uint8_t *, cl::sycl::buffer<uint8_t,1>>::iterator find_buffer(const void* ptr) const {
- std::lock_guard<std::mutex> lock(mutex_);
- auto it1 = buffer_map.find(static_cast<const uint8_t*>(ptr));
- if (it1 != buffer_map.end()){
- return it1;
- }
- else{
- for(std::map<const uint8_t *, cl::sycl::buffer<uint8_t,1>>::iterator it=buffer_map.begin(); it!=buffer_map.end(); ++it){
- auto size = it->second.get_size();
- if((it->first < (static_cast<const uint8_t*>(ptr))) && ((static_cast<const uint8_t*>(ptr)) < (it->first + size)) ) return it;
- }
- }
- std::cerr << "No sycl buffer found. Make sure that you have allocated memory for your buffer by calling malloc-ed function."<< std::endl;
- abort();
+ /// The memcpyHostToDevice is used to copy the device only pointer to a host pointer. Using the device
+ /// pointer created as a key we find the sycl buffer and get the host accessor with write mode
+ /// on it. Then we use the memcpy to copy the data to the host accessor. The first time that
+ /// this buffer is accessed, the data will be copied to the device.
+ /// In this case we can separate the kernel actual execution from data transfer which is required for benchmark
+ /// Also, this is faster as it uses the map_allocator instead of memcpy
+ template<typename Index> EIGEN_STRONG_INLINE void memcpyHostToDevice(Index *dst, const Index *src, size_t n) const {
+ auto it =find_buffer(dst);
+ auto offset =static_cast<const uint8_t*>(static_cast<const void*>(dst))- it->first;
+ offset/=sizeof(Index);
+ size_t rng, GRange, tileSize;
+ parallel_for_setup(n/sizeof(Index), tileSize, rng, GRange);
+ auto src_buf = cl::sycl::buffer<uint8_t, 1, cl::sycl::map_allocator<uint8_t> >(static_cast<uint8_t*>(static_cast<void*>(const_cast<Index*>(src))), cl::sycl::range<1>(n));
+ m_queue.submit([&](cl::sycl::handler &cgh) {
+ auto dst_acc= it->second.template get_access<cl::sycl::access::mode::write, cl::sycl::access::target::global_buffer>(cgh);
+ auto src_acc =src_buf.template get_access<cl::sycl::access::mode::read, cl::sycl::access::target::global_buffer>(cgh);
+ typedef decltype(src_acc) read_accessor;
+ typedef decltype(dst_acc) write_accessor;
+ cgh.parallel_for( cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(tileSize)), MemCopyFunctor<Index, read_accessor, write_accessor>(src_acc, dst_acc, rng, offset, 0));
+ });
+ synchronize();
}
-
- // This function checks if the runtime recorded an error for the
- // underlying stream device.
- EIGEN_STRONG_INLINE bool ok() const {
- if (!exception_caught_) {
- m_queue.wait_and_throw();
- }
- return !exception_caught_;
+ /// The memcpyDeviceToHost is used to copy the data from host to device. Here, in order to avoid double copying the data. We create a sycl
+ /// buffer with map_allocator for the destination pointer with a discard_write accessor on it. The lifespan of the buffer is bound to the
+ /// lifespan of the memcpyDeviceToHost function. We create a kernel to copy the data, from the device- only source buffer to the destination
+ /// buffer with map_allocator on the gpu in parallel. At the end of the function call the destination buffer would be destroyed and the data
+ /// would be available on the dst pointer using fast copy technique (map_allocator). In this case we can make sure that we copy the data back
+ /// to the cpu only once per function call.
+ template<typename Index> EIGEN_STRONG_INLINE void memcpyDeviceToHost(void *dst, const Index *src, size_t n) const {
+ auto it =find_buffer(src);
+ auto offset =static_cast<const uint8_t*>(static_cast<const void*>(src))- it->first;
+ offset/=sizeof(Index);
+ size_t rng, GRange, tileSize;
+ parallel_for_setup(n/sizeof(Index), tileSize, rng, GRange);
+ auto dest_buf = cl::sycl::buffer<uint8_t, 1, cl::sycl::map_allocator<uint8_t> >(static_cast<uint8_t*>(dst), cl::sycl::range<1>(n));
+ m_queue.submit([&](cl::sycl::handler &cgh) {
+ auto src_acc= it->second.template get_access<cl::sycl::access::mode::read, cl::sycl::access::target::global_buffer>(cgh);
+ auto dst_acc =dest_buf.template get_access<cl::sycl::access::mode::discard_write, cl::sycl::access::target::global_buffer>(cgh);
+ typedef decltype(src_acc) read_accessor;
+ typedef decltype(dst_acc) write_accessor;
+ cgh.parallel_for( cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(tileSize)), MemCopyFunctor<Index, read_accessor, write_accessor>(src_acc, dst_acc, rng, 0, offset));
+ });
+ synchronize();
}
- // destructor
- ~QueueInterface() { buffer_map.clear(); }
-};
+ /// the memcpy function
+ template<typename Index> EIGEN_STRONG_INLINE void memcpy(void *dst, const Index *src, size_t n) const {
+ auto it1 = find_buffer(static_cast<const void*>(src));
+ auto it2 = find_buffer(dst);
+ auto offset= (static_cast<const uint8_t*>(static_cast<const void*>(src))) - it1->first;
+ auto i= (static_cast<const uint8_t*>(dst)) - it2->first;
+ offset/=sizeof(Index);
+ i/=sizeof(Index);
+ size_t rng, GRange, tileSize;
+ parallel_for_setup(n/sizeof(Index), tileSize, rng, GRange);
+ m_queue.submit([&](cl::sycl::handler &cgh) {
+ auto src_acc =it1->second.template get_access<cl::sycl::access::mode::read, cl::sycl::access::target::global_buffer>(cgh);
+ auto dst_acc =it2->second.template get_access<cl::sycl::access::mode::write, cl::sycl::access::target::global_buffer>(cgh);
+ typedef decltype(src_acc) read_accessor;
+ typedef decltype(dst_acc) write_accessor;
+ cgh.parallel_for(cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(tileSize)), MemCopyFunctor<Index, read_accessor, write_accessor>(src_acc, dst_acc, rng, i, offset));
+ });
+ synchronize();
+ }
-struct SyclDevice {
- // class member.
- QueueInterface* m_queue_stream;
- /// QueueInterface is not owned. it is the caller's responsibility to destroy it.
- explicit SyclDevice(QueueInterface* queue_stream) : m_queue_stream(queue_stream){}
+ EIGEN_STRONG_INLINE void memset(void *data, int c, size_t n) const {
+ size_t rng, GRange, tileSize;
+ parallel_for_setup(n, tileSize, rng, GRange);
+ auto it1 = find_buffer(static_cast<const void*>(data));
+ ptrdiff_t buff_offset= (static_cast<const uint8_t*>(data)) - it1->first;
+ m_queue.submit(memsetCghFunctor(it1->second, buff_offset, rng, GRange, tileSize, c ));
+ synchronize();
+ }
/// Creation of sycl accessor for a buffer. This function first tries to find
/// the buffer in the buffer_map. If found it gets the accessor from it, if not,
/// the function then adds an entry by creating a sycl buffer for that particular pointer.
template <cl::sycl::access::mode AcMd> EIGEN_STRONG_INLINE cl::sycl::accessor<uint8_t, 1, AcMd, cl::sycl::access::target::global_buffer>
get_sycl_accessor(cl::sycl::handler &cgh, const void* ptr) const {
- return (get_sycl_buffer(ptr).template get_access<AcMd, cl::sycl::access::target::global_buffer>(cgh));
+ return (find_buffer(ptr)->second.template get_access<AcMd, cl::sycl::access::target::global_buffer>(cgh));
}
/// Accessing the created sycl device buffer for the device pointer
- EIGEN_STRONG_INLINE cl::sycl::buffer<uint8_t, 1>& get_sycl_buffer(const void * ptr) const {
- return m_queue_stream->find_buffer(ptr)->second;
+ EIGEN_STRONG_INLINE cl::sycl::buffer<uint8_t, 1, SyclAllocator<uint8_t, EIGEN_MAX_ALIGN_BYTES> >& get_sycl_buffer(const void * ptr) const {
+ return find_buffer(ptr)->second;
+ }
+
+ EIGEN_STRONG_INLINE ptrdiff_t get_offset(const void *ptr) const {
+ return (static_cast<const uint8_t*>(ptr))-(find_buffer(ptr)->first);
+ }
+
+ EIGEN_STRONG_INLINE void synchronize() const {
+ m_queue.wait_and_throw(); //pass
+ }
+
+ EIGEN_STRONG_INLINE void asynchronousExec() const {
+ ///FIXEDME:: currently there is a race condition regarding the asynch scheduler.
+ //sycl_queue().throw_asynchronous();// FIXME::does not pass. Temporarily disabled
+ m_queue.wait_and_throw(); //pass
}
- /// This is used to prepare the number of threads and also the number of threads per block for sycl kernels
template<typename Index>
EIGEN_STRONG_INLINE void parallel_for_setup(Index n, Index &tileSize, Index &rng, Index &GRange) const {
- tileSize =static_cast<Index>(sycl_queue().get_device(). template get_info<cl::sycl::info::device::max_work_group_size>());
- auto s= sycl_queue().get_device().template get_info<cl::sycl::info::device::vendor>();
+ tileSize =static_cast<Index>(m_queue.get_device(). template get_info<cl::sycl::info::device::max_work_group_size>());
+ auto s= m_queue.get_device().template get_info<cl::sycl::info::device::vendor>();
std::transform(s.begin(), s.end(), s.begin(), ::tolower);
- if(sycl_queue().get_device().is_cpu()){ // intel doesnot allow to use max workgroup size
+ if(m_queue.get_device().is_cpu()){ // intel doesnot allow to use max workgroup size
tileSize=std::min(static_cast<Index>(256), static_cast<Index>(tileSize));
}
rng = n;
@@ -210,7 +287,7 @@ struct SyclDevice {
template<typename Index>
EIGEN_STRONG_INLINE void parallel_for_setup(Index dim0, Index dim1, Index &tileSize0, Index &tileSize1, Index &rng0, Index &rng1, Index &GRange0, Index &GRange1) const {
Index max_workgroup_Size = static_cast<Index>(maxSyclThreadsPerBlock());
- if(sycl_queue().get_device().is_cpu()){ // intel doesnot allow to use max workgroup size
+ if(m_queue.get_device().is_cpu()){ // intel doesnot allow to use max workgroup size
max_workgroup_Size=std::min(static_cast<Index>(256), static_cast<Index>(max_workgroup_Size));
}
Index pow_of_2 = static_cast<Index>(std::log2(max_workgroup_Size));
@@ -234,13 +311,11 @@ struct SyclDevice {
}
}
-
-
/// This is used to prepare the number of threads and also the number of threads per block for sycl kernels
template<typename Index>
EIGEN_STRONG_INLINE void parallel_for_setup(Index dim0, Index dim1,Index dim2, Index &tileSize0, Index &tileSize1, Index &tileSize2, Index &rng0, Index &rng1, Index &rng2, Index &GRange0, Index &GRange1, Index &GRange2) const {
Index max_workgroup_Size = static_cast<Index>(maxSyclThreadsPerBlock());
- if(sycl_queue().get_device().is_cpu()){ // intel doesnot allow to use max workgroup size
+ if(m_queue.get_device().is_cpu()){ // intel doesnot allow to use max workgroup size
max_workgroup_Size=std::min(static_cast<Index>(256), static_cast<Index>(max_workgroup_Size));
}
Index pow_of_2 = static_cast<Index>(std::log2(max_workgroup_Size));
@@ -273,6 +348,108 @@ struct SyclDevice {
if (xMode != 0) GRange0 += static_cast<Index>(tileSize0 - xMode);
}
}
+
+ EIGEN_STRONG_INLINE unsigned long getNumSyclMultiProcessors() const {
+ return m_queue.get_device(). template get_info<cl::sycl::info::device::max_compute_units>();
+ }
+
+ EIGEN_STRONG_INLINE unsigned long maxSyclThreadsPerBlock() const {
+ return m_queue.get_device(). template get_info<cl::sycl::info::device::max_work_group_size>();
+ }
+
+ /// No need for sycl it should act the same as CPU version
+ EIGEN_STRONG_INLINE int majorDeviceVersion() const { return 1; }
+
+ EIGEN_STRONG_INLINE unsigned long maxSyclThreadsPerMultiProcessor() const {
+ // OpenCL doesnot have such concept
+ return 2;
+ }
+
+ EIGEN_STRONG_INLINE size_t sharedMemPerBlock() const {
+ return m_queue.get_device(). template get_info<cl::sycl::info::device::local_mem_size>();
+ }
+
+ EIGEN_STRONG_INLINE cl::sycl::queue& sycl_queue() const { return m_queue;}
+
+ // This function checks if the runtime recorded an error for the
+ // underlying stream device.
+ EIGEN_STRONG_INLINE bool ok() const {
+ if (!exception_caught_) {
+ m_queue.wait_and_throw();
+ }
+ return !exception_caught_;
+ }
+
+ // destructor
+ ~QueueInterface() { buffer_map.clear(); }
+
+private:
+ /// class members:
+ bool exception_caught_ = false;
+
+ mutable std::mutex mutex_;
+
+ /// std::map is the container used to make sure that we create only one buffer
+ /// per pointer. The lifespan of the buffer now depends on the lifespan of SyclDevice.
+ /// If a non-read-only pointer is needed to be accessed on the host we should manually deallocate it.
+ mutable std::map<const uint8_t *, cl::sycl::buffer<uint8_t, 1, SyclAllocator<uint8_t, EIGEN_MAX_ALIGN_BYTES> > > buffer_map;
+ /// sycl queue
+ mutable cl::sycl::queue m_queue;
+
+ EIGEN_STRONG_INLINE std::map<const uint8_t *, cl::sycl::buffer<uint8_t,1, SyclAllocator<uint8_t, EIGEN_MAX_ALIGN_BYTES> > >::iterator find_buffer(const void* ptr) const {
+ std::lock_guard<std::mutex> lock(mutex_);
+ auto it1 = buffer_map.find(static_cast<const uint8_t*>(ptr));
+ if (it1 != buffer_map.end()){
+ return it1;
+ }
+ else{
+ for(std::map<const uint8_t *, cl::sycl::buffer<uint8_t,1, SyclAllocator<uint8_t, EIGEN_MAX_ALIGN_BYTES> > >::iterator it=buffer_map.begin(); it!=buffer_map.end(); ++it){
+ auto size = it->second.get_size();
+ if((it->first < (static_cast<const uint8_t*>(ptr))) && ((static_cast<const uint8_t*>(ptr)) < (it->first + size)) ) return it;
+ }
+ }
+ std::cerr << "No sycl buffer found. Make sure that you have allocated memory for your buffer by calling malloc-ed function."<< std::endl;
+ abort();
+ }
+};
+
+// Here is a sycl deviuce struct which accept the sycl queue interface
+// as an input
+struct SyclDevice {
+ // class member.
+ QueueInterface* m_queue_stream;
+ /// QueueInterface is not owned. it is the caller's responsibility to destroy it.
+ explicit SyclDevice(QueueInterface* queue_stream) : m_queue_stream(queue_stream){}
+
+ // get sycl accessor
+ template <cl::sycl::access::mode AcMd> EIGEN_STRONG_INLINE cl::sycl::accessor<uint8_t, 1, AcMd, cl::sycl::access::target::global_buffer>
+ get_sycl_accessor(cl::sycl::handler &cgh, const void* ptr) const {
+ return m_queue_stream->template get_sycl_accessor<AcMd>(cgh, ptr);
+ }
+
+ /// Accessing the created sycl device buffer for the device pointer
+ EIGEN_STRONG_INLINE cl::sycl::buffer<uint8_t, 1, SyclAllocator<uint8_t, EIGEN_MAX_ALIGN_BYTES> >& get_sycl_buffer(const void * ptr) const {
+ return m_queue_stream->get_sycl_buffer(ptr);
+ }
+
+ /// This is used to prepare the number of threads and also the number of threads per block for sycl kernels
+ template<typename Index>
+ EIGEN_STRONG_INLINE void parallel_for_setup(Index n, Index &tileSize, Index &rng, Index &GRange) const {
+ m_queue_stream->parallel_for_setup(n, tileSize, rng, GRange);
+ }
+
+ /// This is used to prepare the number of threads and also the number of threads per block for sycl kernels
+ template<typename Index>
+ EIGEN_STRONG_INLINE void parallel_for_setup(Index dim0, Index dim1, Index &tileSize0, Index &tileSize1, Index &rng0, Index &rng1, Index &GRange0, Index &GRange1) const {
+ m_queue_stream->parallel_for_setup(dim0, dim1, tileSize0, tileSize1, rng0, rng1, GRange0, GRange1);
+ }
+
+ /// This is used to prepare the number of threads and also the number of threads per block for sycl kernels
+ template<typename Index>
+ EIGEN_STRONG_INLINE void parallel_for_setup(Index dim0, Index dim1,Index dim2, Index &tileSize0, Index &tileSize1, Index &tileSize2, Index &rng0, Index &rng1, Index &rng2, Index &GRange0, Index &GRange1, Index &GRange2) const {
+ m_queue_stream->parallel_for_setup(dim0, dim1, dim2, tileSize0, tileSize1, tileSize2, rng0, rng1, rng2, GRange0, GRange1, GRange2);
+
+ }
/// allocate device memory
EIGEN_STRONG_INLINE void *allocate(size_t num_bytes) const {
return m_queue_stream->allocate(num_bytes);
@@ -287,78 +464,27 @@ struct SyclDevice {
/// the memcpy function
template<typename Index> EIGEN_STRONG_INLINE void memcpy(void *dst, const Index *src, size_t n) const {
- auto it1 = m_queue_stream->find_buffer(static_cast<const void*>(src));
- auto it2 = m_queue_stream->find_buffer(dst);
- auto offset= (static_cast<const uint8_t*>(static_cast<const void*>(src))) - it1->first;
- auto i= (static_cast<const uint8_t*>(dst)) - it2->first;
- offset/=sizeof(Index);
- i/=sizeof(Index);
- size_t rng, GRange, tileSize;
- parallel_for_setup(n/sizeof(Index), tileSize, rng, GRange);
- sycl_queue().submit([&](cl::sycl::handler &cgh) {
- auto src_acc =it1->second.template get_access<cl::sycl::access::mode::read, cl::sycl::access::target::global_buffer>(cgh);
- auto dst_acc =it2->second.template get_access<cl::sycl::access::mode::write, cl::sycl::access::target::global_buffer>(cgh);
- typedef decltype(src_acc) read_accessor;
- typedef decltype(dst_acc) write_accessor;
- cgh.parallel_for(cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(tileSize)), MemCopyFunctor<Index, read_accessor, write_accessor>(src_acc, dst_acc, rng, i, offset));
- });
- synchronize();
+ m_queue_stream->memcpy(dst,src,n);
}
- /// The memcpyHostToDevice is used to copy the device only pointer to a host pointer. Using the device
- /// pointer created as a key we find the sycl buffer and get the host accessor with discard_write mode
- /// on it. Using a discard_write accessor guarantees that we do not bring back the current value of the
- /// buffer to host. Then we use the memcpy to copy the data to the host accessor. The first time that
- /// this buffer is accessed, the data will be copied to the device.
+ EIGEN_STRONG_INLINE ptrdiff_t get_offset(const void *ptr) const {
+ return m_queue_stream->get_offset(ptr);
+
+ }
+// memcpyHostToDevice
template<typename Index> EIGEN_STRONG_INLINE void memcpyHostToDevice(Index *dst, const Index *src, size_t n) const {
- auto host_acc= get_sycl_buffer(dst). template get_access<cl::sycl::access::mode::discard_write, cl::sycl::access::target::host_buffer>();
- ::memcpy(host_acc.get_pointer(), src, n);
+ m_queue_stream->memcpyHostToDevice(dst,src,n);
}
- /// The memcpyDeviceToHost is used to copy the data from host to device. Here, in order to avoid double copying the data. We create a sycl
- /// buffer with map_allocator for the destination pointer with a discard_write accessor on it. The lifespan of the buffer is bound to the
- /// lifespan of the memcpyDeviceToHost function. We create a kernel to copy the data, from the device- only source buffer to the destination
- /// buffer with map_allocator on the gpu in parallel. At the end of the function call the destination buffer would be destroyed and the data
- /// would be available on the dst pointer using fast copy technique (map_allocator). In this case we can make sure that we copy the data back
- /// to the cpu only once per function call.
+/// here is the memcpyDeviceToHost
template<typename Index> EIGEN_STRONG_INLINE void memcpyDeviceToHost(void *dst, const Index *src, size_t n) const {
- auto it = m_queue_stream->find_buffer(src);
- auto offset =static_cast<const uint8_t*>(static_cast<const void*>(src))- it->first;
- offset/=sizeof(Index);
- size_t rng, GRange, tileSize;
- parallel_for_setup(n/sizeof(Index), tileSize, rng, GRange);
- // Assuming that the dst is the start of the destination pointer
- auto dest_buf = cl::sycl::buffer<uint8_t, 1, cl::sycl::map_allocator<uint8_t> >(static_cast<uint8_t*>(dst), cl::sycl::range<1>(n));
- sycl_queue().submit([&](cl::sycl::handler &cgh) {
- auto src_acc= it->second.template get_access<cl::sycl::access::mode::read, cl::sycl::access::target::global_buffer>(cgh);
- auto dst_acc =dest_buf.template get_access<cl::sycl::access::mode::discard_write, cl::sycl::access::target::global_buffer>(cgh);
- typedef decltype(src_acc) read_accessor;
- typedef decltype(dst_acc) write_accessor;
- cgh.parallel_for( cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(tileSize)), MemCopyFunctor<Index, read_accessor, write_accessor>(src_acc, dst_acc, rng, 0, offset));
- });
- synchronize();
+ m_queue_stream->memcpyDeviceToHost(dst,src,n);
}
- /// returning the sycl queue
- EIGEN_STRONG_INLINE cl::sycl::queue& sycl_queue() const { return m_queue_stream->m_queue;}
/// Here is the implementation of memset function on sycl.
EIGEN_STRONG_INLINE void memset(void *data, int c, size_t n) const {
- size_t rng, GRange, tileSize;
- parallel_for_setup(n, tileSize, rng, GRange);
- sycl_queue().submit(memsetCghFunctor(get_sycl_buffer(static_cast<uint8_t*>(static_cast<void*>(data))),rng, GRange, tileSize, c ));
- synchronize();
+ m_queue_stream->memset(data,c,n);
}
-
- struct memsetCghFunctor{
- cl::sycl::buffer<uint8_t, 1>& m_buf;
- const size_t& rng , GRange, tileSize;
- const int &c;
- memsetCghFunctor(cl::sycl::buffer<uint8_t, 1>& buff, const size_t& rng_, const size_t& GRange_, const size_t& tileSize_, const int& c_)
- :m_buf(buff), rng(rng_), GRange(GRange_), tileSize(tileSize_), c(c_){}
-
- void operator()(cl::sycl::handler &cgh) const {
- auto buf_acc = m_buf.template get_access<cl::sycl::access::mode::discard_write, cl::sycl::access::target::global_buffer>(cgh);
- cgh.parallel_for(cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(tileSize)), memsetkernelFunctor(buf_acc, rng, c));
- }
- };
+ /// returning the sycl queue
+ EIGEN_STRONG_INLINE cl::sycl::queue& sycl_queue() const { return m_queue_stream->sycl_queue();}
EIGEN_STRONG_INLINE size_t firstLevelCacheSize() const {
// FIXME
@@ -367,39 +493,32 @@ struct SyclDevice {
EIGEN_STRONG_INLINE size_t lastLevelCacheSize() const {
// We won't try to take advantage of the l2 cache for the time being, and
- // there is no l3 cache on cuda devices.
+ // there is no l3 cache on sycl devices.
return firstLevelCacheSize();
}
EIGEN_STRONG_INLINE unsigned long getNumSyclMultiProcessors() const {
- return sycl_queue().get_device(). template get_info<cl::sycl::info::device::max_compute_units>();
- // return stream_->deviceProperties().multiProcessorCount;
+ return m_queue_stream->getNumSyclMultiProcessors();
}
EIGEN_STRONG_INLINE unsigned long maxSyclThreadsPerBlock() const {
- return sycl_queue().get_device(). template get_info<cl::sycl::info::device::max_work_group_size>();
-
- // return stream_->deviceProperties().maxThreadsPerBlock;
+ return m_queue_stream->maxSyclThreadsPerBlock();
}
EIGEN_STRONG_INLINE unsigned long maxSyclThreadsPerMultiProcessor() const {
// OpenCL doesnot have such concept
- return 2;//sycl_queue().get_device(). template get_info<cl::sycl::info::device::max_work_group_size>();
+ return m_queue_stream->maxSyclThreadsPerMultiProcessor();
// return stream_->deviceProperties().maxThreadsPerMultiProcessor;
}
EIGEN_STRONG_INLINE size_t sharedMemPerBlock() const {
- return sycl_queue().get_device(). template get_info<cl::sycl::info::device::local_mem_size>();
- // return stream_->deviceProperties().sharedMemPerBlock;
+ return m_queue_stream->sharedMemPerBlock();
}
/// No need for sycl it should act the same as CPU version
- EIGEN_STRONG_INLINE int majorDeviceVersion() const { return 1; }
+ EIGEN_STRONG_INLINE int majorDeviceVersion() const { return m_queue_stream->majorDeviceVersion(); }
EIGEN_STRONG_INLINE void synchronize() const {
- sycl_queue().wait_and_throw(); //pass
+ m_queue_stream->synchronize(); //pass
}
EIGEN_STRONG_INLINE void asynchronousExec() const {
- ///FIXEDME:: currently there is a race condition regarding the asynch scheduler.
- //sycl_queue().throw_asynchronous();// does not pass. Temporarily disabled
- sycl_queue().wait_and_throw(); //pass
-
+ m_queue_stream->asynchronousExec();
}
// This function checks if the runtime recorded an error for the
// underlying stream device.
@@ -407,8 +526,10 @@ struct SyclDevice {
return m_queue_stream->ok();
}
};
-
-
+// This is used as a distingushable device inside the kernel as the sycl device class is not Standard layout.
+// This is internal and must not be used by user. This dummy device allow us to specialise the tensor evaluator
+// inside the kenrel. So we can have two types of eval for host and device. This is required for TensorArgMax operation
+struct SyclKernelDevice:DefaultDevice{};
} // end namespace Eigen