aboutsummaryrefslogtreecommitdiffhomepage
path: root/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h
diff options
context:
space:
mode:
authorGravatar Mehdi Goli <mehdi.goli@codeplay.com>2017-01-20 18:23:20 +0000
committerGravatar Mehdi Goli <mehdi.goli@codeplay.com>2017-01-20 18:23:20 +0000
commit602f8c27f5307f1da966df2fc26745ecd0e78fc9 (patch)
treeacbdd924d558a40448991791a601fe563a5f1b51 /unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h
parent77cc4d06c746e7be2966bd0d09b55c2393e289d8 (diff)
Reverting back to the previous TensorDeviceSycl.h as the total number of buffer is not enough for tensorflow.
Diffstat (limited to 'unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h')
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h83
1 files changed, 41 insertions, 42 deletions
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h b/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h
index a30090714..722a5d894 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h
@@ -15,16 +15,13 @@
#if defined(EIGEN_USE_SYCL) && !defined(EIGEN_CXX11_TENSOR_TENSOR_DEVICE_SYCL_H)
#define EIGEN_CXX11_TENSOR_TENSOR_DEVICE_SYCL_H
-#include "TensorSyclLegacyPointer.h"
-
namespace Eigen {
#define ConvertToActualTypeSycl(Scalar, buf_acc) reinterpret_cast<typename cl::sycl::global_ptr<Scalar>::pointer_t>((&(*buf_acc.get_pointer())))
template <typename Scalar, typename read_accessor, typename write_accessor> class MemCopyFunctor {
public:
- MemCopyFunctor(read_accessor src_acc, write_accessor dst_acc, size_t rng, size_t i, size_t offset)
- : m_src_acc(src_acc), m_dst_acc(dst_acc), m_rng(rng), m_i(i), m_offset(offset) {}
+ MemCopyFunctor(read_accessor src_acc, write_accessor dst_acc, size_t rng, size_t i, size_t offset) : m_src_acc(src_acc), m_dst_acc(dst_acc), m_rng(rng), m_i(i), m_offset(offset) {}
void operator()(cl::sycl::nd_item<1> itemID) {
auto src_ptr = ConvertToActualTypeSycl(Scalar, m_src_acc);
@@ -55,7 +52,6 @@ namespace Eigen {
};
-
EIGEN_STRONG_INLINE auto get_sycl_supported_devices()->decltype(cl::sycl::device::get_devices()){
auto devices = cl::sycl::device::get_devices();
std::vector<cl::sycl::device>::iterator it =devices.begin();
@@ -78,10 +74,11 @@ struct QueueInterface {
bool exception_caught_ = false;
mutable std::mutex mutex_;
+
/// std::map is the container used to make sure that we create only one buffer
/// per pointer. The lifespan of the buffer now depends on the lifespan of SyclDevice.
/// If a non-read-only pointer is needed to be accessed on the host we should manually deallocate it.
- //mutable std::map<const uint8_t *, cl::sycl::buffer<uint8_t, 1>> buffer_map;
+ mutable std::map<const uint8_t *, cl::sycl::buffer<uint8_t, 1>> buffer_map;
/// sycl queue
mutable cl::sycl::queue m_queue;
/// creating device by using cl::sycl::selector or cl::sycl::device both are the same and can be captured through dev_Selector typename
@@ -119,42 +116,45 @@ m_queue(cl::sycl::queue(s, [&](cl::sycl::exception_list l) {
/// use this pointer as a key in our buffer_map and we make sure that we dedicate only one buffer only for this pointer.
/// The device pointer would be deleted by calling deallocate function.
EIGEN_STRONG_INLINE void* allocate(size_t num_bytes) const {
+ auto buf = cl::sycl::buffer<uint8_t,1>(cl::sycl::range<1>(num_bytes));
+ auto ptr =buf.get_access<cl::sycl::access::mode::discard_write, cl::sycl::access::target::host_buffer>().get_pointer();
+ buf.set_final_data(nullptr);
std::lock_guard<std::mutex> lock(mutex_);
- return codeplay::legacy::malloc(num_bytes);
+ buffer_map.insert(std::pair<const uint8_t *, cl::sycl::buffer<uint8_t, 1>>(static_cast<const uint8_t*>(ptr),buf));
+ return static_cast<void*>(ptr);
}
/// This is used to deallocate the device pointer. p is used as a key inside
/// the map to find the device buffer and delete it.
EIGEN_STRONG_INLINE void deallocate(void *p) const {
std::lock_guard<std::mutex> lock(mutex_);
- return codeplay::legacy::free(p);
+ auto it = buffer_map.find(static_cast<const uint8_t*>(p));
+ if (it != buffer_map.end()) {
+ buffer_map.erase(it);
+ }
}
EIGEN_STRONG_INLINE void deallocate_all() const {
std::lock_guard<std::mutex> lock(mutex_);
- codeplay::legacy::clear();
+ buffer_map.clear();
}
- EIGEN_STRONG_INLINE codeplay::legacy::PointerMapper& pointerMapper() const {
+ EIGEN_STRONG_INLINE std::map<const uint8_t *, cl::sycl::buffer<uint8_t,1>>::iterator find_buffer(const void* ptr) const {
std::lock_guard<std::mutex> lock(mutex_);
- return codeplay::legacy::getPointerMapper();
- }
-
- EIGEN_STRONG_INLINE cl::sycl::buffer<uint8_t,1> get_buffer(void* ptr) const {
- std::lock_guard<std::mutex> lock(mutex_);
- return pointerMapper().get_buffer(pointerMapper().get_buffer_id(ptr));
- }
-
- EIGEN_STRONG_INLINE size_t get_buffer_offset(void* ptr) const {
- std::lock_guard<std::mutex> lock(mutex_);
- return pointerMapper().get_offset(ptr);
+ auto it1 = buffer_map.find(static_cast<const uint8_t*>(ptr));
+ if (it1 != buffer_map.end()){
+ return it1;
+ }
+ else{
+ for(std::map<const uint8_t *, cl::sycl::buffer<uint8_t,1>>::iterator it=buffer_map.begin(); it!=buffer_map.end(); ++it){
+ auto size = it->second.get_size();
+ if((it->first < (static_cast<const uint8_t*>(ptr))) && ((static_cast<const uint8_t*>(ptr)) < (it->first + size)) ) return it;
+ }
+ }
+ std::cerr << "No sycl buffer found. Make sure that you have allocated memory for your buffer by calling malloc-ed function."<< std::endl;
+ abort();
}
- /*EIGEN_STRONG_INLINE void* get_buffer_id(void* ptr) const {
- std::lock_guard<std::mutex> lock(mutex_);
- return static_cast<void*>(pointerMapper().get_buffer_id(ptr));
- }*/
-
// This function checks if the runtime recorded an error for the
// underlying stream device.
EIGEN_STRONG_INLINE bool ok() const {
@@ -165,7 +165,7 @@ m_queue(cl::sycl::queue(s, [&](cl::sycl::exception_list l) {
}
// destructor
- ~QueueInterface() { codeplay::legacy::clear(); }
+ ~QueueInterface() { buffer_map.clear(); }
};
struct SyclDevice {
@@ -183,11 +183,10 @@ struct SyclDevice {
}
/// Accessing the created sycl device buffer for the device pointer
- EIGEN_STRONG_INLINE cl::sycl::buffer<uint8_t, 1> get_sycl_buffer(const void * ptr) const {
- return m_queue_stream->get_buffer(const_cast<void*>(ptr));
+ EIGEN_STRONG_INLINE cl::sycl::buffer<uint8_t, 1>& get_sycl_buffer(const void * ptr) const {
+ return m_queue_stream->find_buffer(ptr)->second;
}
-
/// This is used to prepare the number of threads and also the number of threads per block for sycl kernels
template<typename Index>
EIGEN_STRONG_INLINE void parallel_for_setup(Index n, Index &tileSize, Index &rng, Index &GRange) const {
@@ -274,8 +273,6 @@ struct SyclDevice {
if (xMode != 0) GRange0 += static_cast<Index>(tileSize0 - xMode);
}
}
-
-
/// allocate device memory
EIGEN_STRONG_INLINE void *allocate(size_t num_bytes) const {
return m_queue_stream->allocate(num_bytes);
@@ -290,15 +287,17 @@ struct SyclDevice {
/// the memcpy function
template<typename Index> EIGEN_STRONG_INLINE void memcpy(void *dst, const Index *src, size_t n) const {
- auto offset= m_queue_stream->get_buffer_offset((void*)src);
- auto i= m_queue_stream->get_buffer_offset(dst);
+ auto it1 = m_queue_stream->find_buffer((void*)src);
+ auto it2 = m_queue_stream->find_buffer(dst);
+ auto offset= (static_cast<const uint8_t*>(static_cast<const void*>(src))) - it1->first;
+ auto i= (static_cast<const uint8_t*>(dst)) - it2->first;
offset/=sizeof(Index);
i/=sizeof(Index);
size_t rng, GRange, tileSize;
parallel_for_setup(n/sizeof(Index), tileSize, rng, GRange);
sycl_queue().submit([&](cl::sycl::handler &cgh) {
- auto src_acc =get_sycl_accessor<cl::sycl::access::mode::read>(cgh, src);
- auto dst_acc =get_sycl_accessor<cl::sycl::access::mode::write>(cgh, dst);
+ auto src_acc =it1->second.template get_access<cl::sycl::access::mode::read, cl::sycl::access::target::global_buffer>(cgh);
+ auto dst_acc =it2->second.template get_access<cl::sycl::access::mode::discard_write, cl::sycl::access::target::global_buffer>(cgh);
typedef decltype(src_acc) read_accessor;
typedef decltype(dst_acc) write_accessor;
cgh.parallel_for(cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(tileSize)), MemCopyFunctor<Index, read_accessor, write_accessor>(src_acc, dst_acc, rng, i, offset));
@@ -311,11 +310,10 @@ struct SyclDevice {
/// on it. Using a discard_write accessor guarantees that we do not bring back the current value of the
/// buffer to host. Then we use the memcpy to copy the data to the host accessor. The first time that
/// this buffer is accessed, the data will be copied to the device.
- template<typename T> EIGEN_STRONG_INLINE void memcpyHostToDevice(T *dst, const T *src, size_t n) const {
+ template<typename Index> EIGEN_STRONG_INLINE void memcpyHostToDevice(Index *dst, const Index *src, size_t n) const {
auto host_acc= get_sycl_buffer(dst). template get_access<cl::sycl::access::mode::discard_write, cl::sycl::access::target::host_buffer>();
::memcpy(host_acc.get_pointer(), src, n);
}
-
/// The memcpyDeviceToHost is used to copy the data from host to device. Here, in order to avoid double copying the data. We create a sycl
/// buffer with map_allocator for the destination pointer with a discard_write accessor on it. The lifespan of the buffer is bound to the
/// lifespan of the memcpyDeviceToHost function. We create a kernel to copy the data, from the device- only source buffer to the destination
@@ -323,14 +321,15 @@ struct SyclDevice {
/// would be available on the dst pointer using fast copy technique (map_allocator). In this case we can make sure that we copy the data back
/// to the cpu only once per function call.
template<typename Index> EIGEN_STRONG_INLINE void memcpyDeviceToHost(void *dst, const Index *src, size_t n) const {
- auto offset =m_queue_stream->get_buffer_offset((void *)src);
+ auto it = m_queue_stream->find_buffer(src);
+ auto offset =static_cast<const uint8_t*>(static_cast<const void*>(src))- it->first;
offset/=sizeof(Index);
size_t rng, GRange, tileSize;
parallel_for_setup(n/sizeof(Index), tileSize, rng, GRange);
// Assuming that the dst is the start of the destination pointer
auto dest_buf = cl::sycl::buffer<uint8_t, 1, cl::sycl::map_allocator<uint8_t> >(static_cast<uint8_t*>(dst), cl::sycl::range<1>(n));
sycl_queue().submit([&](cl::sycl::handler &cgh) {
- auto src_acc= get_sycl_accessor<cl::sycl::access::mode::read>(cgh, src);
+ auto src_acc= it->second.template get_access<cl::sycl::access::mode::read, cl::sycl::access::target::global_buffer>(cgh);
auto dst_acc =dest_buf.template get_access<cl::sycl::access::mode::discard_write, cl::sycl::access::target::global_buffer>(cgh);
typedef decltype(src_acc) read_accessor;
typedef decltype(dst_acc) write_accessor;
@@ -344,8 +343,7 @@ struct SyclDevice {
EIGEN_STRONG_INLINE void memset(void *data, int c, size_t n) const {
size_t rng, GRange, tileSize;
parallel_for_setup(n, tileSize, rng, GRange);
- auto buf =get_sycl_buffer(static_cast<uint8_t*>(static_cast<void*>(data)));
- sycl_queue().submit(memsetCghFunctor(buf,rng, GRange, tileSize, c ));
+ sycl_queue().submit(memsetCghFunctor(get_sycl_buffer(static_cast<uint8_t*>(static_cast<void*>(data))),rng, GRange, tileSize, c ));
synchronize();
}
@@ -411,6 +409,7 @@ struct SyclDevice {
};
+
} // end namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_DEVICE_SYCL_H