From 79aa2b784ecc26d6a8ef6fb2b2b053f4ad81593b Mon Sep 17 00:00:00 2001 From: Mehdi Goli Date: Thu, 1 Dec 2016 13:02:27 +0000 Subject: Adding sycl backend for TensorPadding.h; disbaling __unit128 for sycl in TensorIntDiv.h; disabling cashsize for sycl in tensorDeviceDefault.h; adding sycl backend for StrideSliceOP ; removing sycl compiler warning for creating an array of size 0 in CXX11Meta.h; cleaning up the sycl backend code. --- .../Eigen/CXX11/src/Tensor/TensorReductionSycl.h | 25 ---------------------- 1 file changed, 25 deletions(-) (limited to 'unsupported/Eigen/CXX11/src/Tensor/TensorReductionSycl.h') diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorReductionSycl.h b/unsupported/Eigen/CXX11/src/Tensor/TensorReductionSycl.h index abb8420a6..48c5f9a47 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorReductionSycl.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorReductionSycl.h @@ -200,9 +200,6 @@ struct InnerReducer { /// creating the shared memory for calculating reduction. /// This one is used to collect all the reduced value of shared memory as we dont have global barrier on GPU. Once it is saved we can /// recursively apply reduction on it in order to reduce the whole. - // Dims dims= self.xprDims(); - //Op functor = reducer; - dev.parallel_for_setup(num_coeffs_to_preserve, tileSize, range, GRange); dev.sycl_queue().submit([&](cl::sycl::handler &cgh) { // create a tuple of accessors from Evaluator @@ -214,28 +211,6 @@ struct InnerReducer { TensorSycl::internal::ReductionFunctor (output_accessor, functors, tuple_of_accessors, self.xprDims(), reducer, range)); - - // [=](cl::sycl::nd_item<1> itemID) { - // typedef typename TensorSycl::internal::ConvertToDeviceExpression::Type DevExpr; - // auto device_expr = TensorSycl::internal::createDeviceExpression(functors, tuple_of_accessors); - /// reduction cannot be captured automatically through our device conversion recursion. The reason is that reduction has two behaviour - /// the first behaviour is when it is used as a root to lauch the sub-kernel. The second one is when it is treated as a leafnode to pass the - /// calculated result to its parent kernel. While the latter is automatically detected through our device expression generator. The former is created here. - // const auto device_self_expr= TensorReductionOp(device_expr.expr, dims, functor); - /// This is the evaluator for device_self_expr. This is exactly similar to the self which has been passed to run function. The difference is - /// the device_evaluator is detectable and recognisable on the device. - // typedef Eigen::TensorEvaluator DeviceSelf; - // auto device_self_evaluator = Eigen::TensorEvaluator(device_self_expr, Eigen::DefaultDevice()); - // auto output_accessor_ptr =ConvertToActualTypeSycl(typename DeviceSelf::CoeffReturnType, output_accessor); - /// const cast added as a naive solution to solve the qualifier drop error - // auto globalid=itemID.get_global_linear_id(); - // if (globalid< range) { - // typename DeviceSelf::CoeffReturnType accum = functor.initialize(); - // GenericDimReducer::reduce(device_self_evaluator, device_self_evaluator.firstInput(static_cast(globalid)),const_cast(functor), &accum); - // functor.finalize(accum); - // output_accessor_ptr[globalid]= accum; - // } - // }); }); dev.synchronize(); return false; -- cgit v1.2.3