From d57430dd73ab2f88aa5e45c370f6ab91103ff18a Mon Sep 17 00:00:00 2001 From: Mehdi Goli Date: Tue, 8 Nov 2016 17:08:02 +0000 Subject: Converting all sycl buffers to uninitialised device only buffers; adding memcpyHostToDevice and memcpyDeviceToHost on syclDevice; modifying all examples to obey the new rules; moving sycl queue creating to the device based on Benoit suggestion; removing the sycl specefic condition for returning m_result in TensorReduction.h according to Benoit suggestion. --- unsupported/Eigen/CXX11/src/Tensor/TensorReductionSycl.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'unsupported/Eigen/CXX11/src/Tensor/TensorReductionSycl.h') diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorReductionSycl.h b/unsupported/Eigen/CXX11/src/Tensor/TensorReductionSycl.h index 1c89132db..3daecb045 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorReductionSycl.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorReductionSycl.h @@ -27,9 +27,9 @@ namespace internal { template struct syclGenericBufferReducer{ template -static void run(BufferTOut& bufOut, BufferTIn& bufI, const Eigen::SyclDevice& dev, size_t length, size_t local){ +static void run(BufferTOut* bufOut, BufferTIn& bufI, const Eigen::SyclDevice& dev, size_t length, size_t local){ do { - auto f = [length, local, &bufOut, &bufI](cl::sycl::handler& h) mutable { + auto f = [length, local, bufOut, &bufI](cl::sycl::handler& h) mutable { cl::sycl::nd_range<1> r{cl::sycl::range<1>{std::max(length, local)}, cl::sycl::range<1>{std::min(length, local)}}; /* Two accessors are used: one to the buffer that is being reduced, @@ -37,7 +37,7 @@ static void run(BufferTOut& bufOut, BufferTIn& bufI, const Eigen::SyclDevice& de auto aI = bufI.template get_access(h); auto aOut = - bufOut.template get_access(h); + bufOut->template get_access(h); cl::sycl::accessor scratch(cl::sycl::range<1>(local), h); @@ -134,7 +134,7 @@ struct FullReducer { /// if the shared memory is less than the GRange, we set shared_mem size to the TotalSize and in this case one kernel would be created for recursion to reduce all to one. if (GRange < outTileSize) outTileSize=GRange; // getting final out buffer at the moment the created buffer is true because there is no need for assign - auto out_buffer =dev.template get_sycl_buffer::type>(self.dimensions().TotalSize(), output); + auto out_buffer =dev.template get_sycl_buffer::type>(self.dimensions().TotalSize(), output); /// creating the shared memory for calculating reduction. /// This one is used to collect all the reduced value of shared memory as we dont have global barrier on GPU. Once it is saved we can /// recursively apply reduction on it in order to reduce the whole. @@ -208,7 +208,7 @@ struct InnerReducer { dev.m_queue.submit([&](cl::sycl::handler &cgh) { // create a tuple of accessors from Evaluator auto tuple_of_accessors = TensorSycl::internal::createTupleOfAccessors(cgh, self.impl()); - auto output_accessor = dev.template get_sycl_accessor(num_coeffs_to_preserve,cgh, output); + auto output_accessor = dev.template get_sycl_accessor(num_coeffs_to_preserve,cgh, output); cgh.parallel_for( cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(tileSize)), [=](cl::sycl::nd_item<1> itemID) { typedef typename TensorSycl::internal::ConvertToDeviceExpression::Type DevExpr; -- cgit v1.2.3