aboutsummaryrefslogtreecommitdiffhomepage
path: root/unsupported/Eigen/CXX11
diff options
context:
space:
mode:
authorGravatar Mehdi Goli <mehdi.goli@codeplay.com>2016-11-10 18:45:12 +0000
committerGravatar Mehdi Goli <mehdi.goli@codeplay.com>2016-11-10 18:45:12 +0000
commit2e704d4257f235dd1f3224cd590e4fca4e3aaf96 (patch)
treee56e7d5b886830cde7cf6fa90c448880fb578665 /unsupported/Eigen/CXX11
parent75c080b1762b8b83f6c2bb7baf95478a049b45d4 (diff)
Adding Memset; optimising MecopyDeviceToHost by removing double copying;
Diffstat (limited to 'unsupported/Eigen/CXX11')
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h72
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorReductionSycl.h13
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorSyclRun.h12
3 files changed, 65 insertions, 32 deletions
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h b/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h
index 7c039890e..e767d8965 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h
@@ -72,9 +72,14 @@ struct SyclDevice {
template<typename T> inline std::pair<std::map<const void *, std::shared_ptr<void>>::iterator,bool> add_sycl_buffer(const T *ptr, size_t num_bytes) const {
using Type = cl::sycl::buffer<T, 1>;
- std::pair<std::map<const void *, std::shared_ptr<void>>::iterator,bool> ret = buffer_map.insert(std::pair<const void *, std::shared_ptr<void>>(ptr, std::shared_ptr<void>(new Type(cl::sycl::range<1>(num_bytes)),
- [](void *dataMem) { delete static_cast<Type*>(dataMem); })));
- (static_cast<Type*>(buffer_map.at(ptr).get()))->set_final_data(nullptr);
+ std::pair<std::map<const void *, std::shared_ptr<void>>::iterator,bool> ret;
+ if(ptr!=nullptr){
+ ret= buffer_map.insert(std::pair<const void *, std::shared_ptr<void>>(ptr, std::shared_ptr<void>(new Type(cl::sycl::range<1>(num_bytes)),
+ [](void *dataMem) { delete static_cast<Type*>(dataMem); })));
+ (static_cast<Type*>(ret.first->second.get()))->set_final_data(nullptr);
+ } else {
+ eigen_assert("The Device memory is not allocated please call allocate on the device is not initialised!!")
+ }
return ret;
}
@@ -83,36 +88,77 @@ struct SyclDevice {
}
/// allocating memory on the cpu
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void *allocate(size_t) const {
+ void *allocate(size_t) const {
return internal::aligned_malloc(8);
}
// some runtime conditions that can be applied here
bool isDeviceSuitable() const { return true; }
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpy(void *dst, const void *src, size_t n) const {
+ void memcpy(void *dst, const void *src, size_t n) const {
::memcpy(dst, src, n);
}
- template<typename T> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpyHostToDevice(T *dst, const T *src, size_t n) const {
+ template<typename T> void memcpyHostToDevice(T *dst, const T *src, size_t n) const {
auto host_acc= (static_cast<cl::sycl::buffer<T, 1>*>(add_sycl_buffer(dst, n).first->second.get()))-> template get_access<cl::sycl::access::mode::discard_write, cl::sycl::access::target::host_buffer>();
memcpy(host_acc.get_pointer(), src, n);
}
- /// whith the current implementation of sycl, the data is copied twice from device to host. This will be fixed soon.
- template<typename T> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpyDeviceToHost(T *dst, const T *src, size_t n) const {
+
+ inline void parallel_for_setup(size_t n, size_t &tileSize, size_t &rng, size_t &GRange) const {
+ tileSize =m_queue.get_device(). template get_info<cl::sycl::info::device::max_work_group_size>()/2;
+ rng = n;
+ if (rng==0) rng=1;
+ GRange=rng;
+ if (tileSize>GRange) tileSize=GRange;
+ else if(GRange>tileSize){
+ size_t xMode = GRange % tileSize;
+ if (xMode != 0) GRange += (tileSize - xMode);
+ }
+ }
+
+ template<typename T> void memcpyDeviceToHost(T *dst, const T *src, size_t n) const {
auto it = buffer_map.find(src);
if (it != buffer_map.end()) {
- auto host_acc= (static_cast<cl::sycl::buffer<T, 1>*>(it->second.get()))-> template get_access<cl::sycl::access::mode::read, cl::sycl::access::target::host_buffer>();
- memcpy(dst,host_acc.get_pointer(), n);
+ size_t rng, GRange, tileSize;
+ parallel_for_setup(n/sizeof(T), tileSize, rng, GRange);
+
+ auto dest_buf = cl::sycl::buffer<T, 1, cl::sycl::map_allocator<T>>(dst, cl::sycl::range<1>(rng));
+ typedef decltype(dest_buf) SYCLDTOH;
+ m_queue.submit([&](cl::sycl::handler &cgh) {
+ auto src_acc= (static_cast<cl::sycl::buffer<T, 1>*>(it->second.get()))-> template get_access<cl::sycl::access::mode::read, cl::sycl::access::target::global_buffer>(cgh);
+ auto dst_acc =dest_buf.template get_access<cl::sycl::access::mode::discard_write, cl::sycl::access::target::global_buffer>(cgh);
+ cgh.parallel_for<SYCLDTOH>( cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(tileSize)), [=](cl::sycl::nd_item<1> itemID) {
+ auto globalid=itemID.get_global_linear_id();
+ if (globalid< dst_acc.get_size()) {
+ dst_acc[globalid] = src_acc[globalid];
+ }
+ });
+ });
+ m_queue.throw_asynchronous();
+
} else{
eigen_assert("no device memory found. The memory might be destroyed before creation");
}
}
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memset(void *buffer, int c, size_t n) const {
- ::memset(buffer, c, n);
+ template<typename T> void memset(T *buff, int c, size_t n) const {
+
+ size_t rng, GRange, tileSize;
+ parallel_for_setup(n/sizeof(T), tileSize, rng, GRange);
+ m_queue.submit([&](cl::sycl::handler &cgh) {
+ auto buf_acc =(static_cast<cl::sycl::buffer<T, 1>*>(add_sycl_buffer(buff, n).first->second.get()))-> template get_access<cl::sycl::access::mode::discard_write, cl::sycl::access::target::global_buffer>(cgh);
+ cgh.parallel_for<SyclDevice>( cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(tileSize)), [=](cl::sycl::nd_item<1> itemID) {
+ auto globalid=itemID.get_global_linear_id();
+ auto buf_ptr= reinterpret_cast<typename cl::sycl::global_ptr<unsigned char>::pointer_t>((&(*buf_acc.get_pointer())));
+ if (globalid< buf_acc.get_size()) {
+ for(size_t i=0; i<sizeof(T); i++)
+ buf_ptr[globalid*sizeof(T) + i] = c;
+ }
+ });
+ });
+ m_queue.throw_asynchronous();
}
- EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int majorDeviceVersion() const {
+ int majorDeviceVersion() const {
return 1;
}
};
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorReductionSycl.h b/unsupported/Eigen/CXX11/src/Tensor/TensorReductionSycl.h
index 3daecb045..db23bd7b0 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorReductionSycl.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorReductionSycl.h
@@ -188,15 +188,8 @@ struct InnerReducer<Self, Op, const Eigen::SyclDevice> {
typedef const typename Self::ChildType HostExpr; /// this is the child of reduction
typedef typename TensorSycl::internal::createPlaceHolderExpression<HostExpr>::Type PlaceHolderExpr;
auto functors = TensorSycl::internal::extractFunctors(self.impl());
-
- size_t tileSize =dev.m_queue.get_device(). template get_info<cl::sycl::info::device::max_work_group_size>()/2;
-
- size_t GRange=num_coeffs_to_preserve;
- if (tileSize>GRange) tileSize=GRange;
- else if(GRange>tileSize){
- size_t xMode = GRange % tileSize;
- if (xMode != 0) GRange += (tileSize - xMode);
- }
+ size_t range, GRange, tileSize;
+ dev.parallel_for_setup(num_coeffs_to_preserve, tileSize, range, GRange);
// getting final out buffer at the moment the created buffer is true because there is no need for assign
/// creating the shared memory for calculating reduction.
/// This one is used to collect all the reduced value of shared memory as we dont have global barrier on GPU. Once it is saved we can
@@ -223,7 +216,7 @@ struct InnerReducer<Self, Op, const Eigen::SyclDevice> {
auto device_self_evaluator = Eigen::TensorEvaluator<decltype(device_self_expr), Eigen::DefaultDevice>(device_self_expr, Eigen::DefaultDevice());
/// const cast added as a naive solution to solve the qualifier drop error
auto globalid=itemID.get_global_linear_id();
- if (globalid< static_cast<size_t>(num_coeffs_to_preserve)) {
+ if (globalid< range) {
typename DeiceSelf::CoeffReturnType accum = functor.initialize();
GenericDimReducer<DeiceSelf::NumReducedDims-1, DeiceSelf, Op>::reduce(device_self_evaluator, device_self_evaluator.firstInput(globalid),const_cast<Op&>(functor), &accum);
functor.finalize(accum);
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclRun.h b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclRun.h
index 7914b6fad..724eebd83 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclRun.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclRun.h
@@ -37,18 +37,12 @@ void run(Expr &expr, Dev &dev) {
typedef typename internal::createPlaceHolderExpression<Expr>::Type PlaceHolderExpr;
auto functors = internal::extractFunctors(evaluator);
- size_t tileSize =dev.m_queue.get_device(). template get_info<cl::sycl::info::device::max_work_group_size>()/2;
dev.m_queue.submit([&](cl::sycl::handler &cgh) {
-
// create a tuple of accessors from Evaluator
auto tuple_of_accessors = internal::createTupleOfAccessors<decltype(evaluator)>(cgh, evaluator);
- const auto range = utility::tuple::get<0>(tuple_of_accessors).get_range()[0];
- size_t GRange=range;
- if (tileSize>GRange) tileSize=GRange;
- else if(GRange>tileSize){
- size_t xMode = GRange % tileSize;
- if (xMode != 0) GRange += (tileSize - xMode);
- }
+ size_t range, GRange, tileSize;
+ dev.parallel_for_setup(utility::tuple::get<0>(tuple_of_accessors).get_range()[0], tileSize, range, GRange);
+
// run the kernel
cgh.parallel_for<PlaceHolderExpr>( cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(tileSize)), [=](cl::sycl::nd_item<1> itemID) {
typedef typename internal::ConvertToDeviceExpression<Expr>::Type DevExpr;