/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #ifndef TENSORFLOW_CORE_KERNELS_SCATTER_FUNCTOR_H_ #define TENSORFLOW_CORE_KERNELS_SCATTER_FUNCTOR_H_ #include #include "third_party/eigen3/Eigen/Core" #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/variant_op_registry.h" #include "tensorflow/core/kernels/bounds_check.h" #include "tensorflow/core/kernels/dense_update_functor.h" #include "tensorflow/core/platform/types.h" namespace tensorflow { class OpKernelContext; typedef Eigen::ThreadPoolDevice CPUDevice; typedef Eigen::GpuDevice GPUDevice; #ifdef TENSORFLOW_USE_SYCL typedef Eigen::SyclDevice SYCLDevice; #endif // TENSORFLOW_USE_SYCL namespace scatter_op { enum class UpdateOp { ASSIGN, ADD, SUB, MUL, DIV, MIN, MAX }; namespace internal { template struct Assign {}; template <> struct Assign { template static void Run(Params p, Update u) { p = u; } template static void RunScalar(Params p, Update u) { p.setConstant(u); } }; template <> struct Assign { template static void Run(Params p, Update u) { p += u; } template static void RunScalar(Params p, Update u) { p = p + u; } }; template <> struct Assign { template static void Run(Params p, Update u) { p -= u; } template static void RunScalar(Params p, Update u) { p = p + static_cast(-u); } }; template <> struct Assign { template static void Run(Params p, Update u) { p *= u; } template static void RunScalar(Params p, Update u) { p = p * u; } }; template <> struct Assign { template static void Run(Params p, Update u) { p /= u; } template static void RunScalar(Params p, Update u) { p = p / u; } }; template <> struct Assign { // This method requires that Params and Update are tensor types. template static void Run(Params p, Update u) { p = p.cwiseMin(u); } // Same thing, but for Update being a scalar type. template static void RunScalar(Params p, Update u) { p = p.cwiseMin(u); } }; template <> struct Assign { template static void Run(Params p, Update u) { p = p.cwiseMax(u); } template static void RunScalar(Params p, Update u) { p = p.cwiseMax(u); } }; #ifdef TENSORFLOW_USE_SYCL template struct AssignSYCL {}; template <> struct AssignSYCL { template static void Run(Device d, Params p, Update u) { p.device(d) = u; } }; template <> struct AssignSYCL { template static void Run(Device d, Params p, Update u) { p.device(d) += u; } }; template <> struct AssignSYCL { template static void Run(Device d, Params p, Update u) { p.device(d) -= u; } }; template <> struct AssignSYCL { template static void Run(Device d, Params p, Update u) { p.device(d) = p * u; } }; template <> struct AssignSYCL { template static void Run(Device d, Params p, Update u) { p.device(d) = p / u; } }; template <> struct AssignSYCL { template static void Run(Device d, Params p, Update u) { p.device(d) = p.cwiseMin(u); } }; template <> struct AssignSYCL { template static void Run(Device d, Params p, Update u) { p.device(d) = p.cwiseMax(u); } }; #endif // TENSORFLOW_USE_SYCL } // namespace internal } // namespace scatter_op namespace functor { template struct ScatterFunctor { Index operator()(OpKernelContext* c, const Device& d, typename TTypes::Matrix params, typename TTypes::ConstMatrix updates, typename TTypes::ConstFlat indices); }; template struct ScatterFunctorBase { Index operator()(OpKernelContext* c, const Device& d, typename TTypes::Matrix params, typename TTypes::ConstMatrix updates, typename TTypes::ConstFlat indices) { // indices and params sizes were validated in DoCompute(). const Index N = static_cast(indices.size()); const Index limit = static_cast(params.dimension(0)); for (Index i = 0; i < N; i++) { // Grab the index and check its validity. Do this carefully, // to avoid checking the value and grabbing it again from // memory a second time (a security risk since it may change in between). const Index index = ::tensorflow::internal::SubtleMustCopy(indices(i)); if (!FastBoundsCheck(index, limit)) return i; // Copy last Ndim-1 dimensions of updates[i] to params[index] scatter_op::internal::Assign::Run(params.template chip<0>(index), updates.template chip<0>(i)); } return -1; } }; template struct ScatterFunctorVariantAssignBase { Index operator()(OpKernelContext* c, const Device& d, typename TTypes::Matrix params, typename TTypes::ConstMatrix updates, typename TTypes::ConstFlat indices) { // indices and params sizes were validated in DoCompute(). const Index N = static_cast(indices.size()); const Index limit = static_cast(params.dimension(0)); const Index cols = static_cast(params.dimension(1)); DCHECK_EQ(N, updates.dimension(0)); DCHECK_EQ(cols, updates.dimension(1)); for (Index i = 0; i < N; i++) { // Grab the index and check its validity. Do this carefully, // to avoid checking the value and grabbing it again from // memory a second time (a security risk since it may change in between). const Index index = ::tensorflow::internal::SubtleMustCopy(indices(i)); if (!FastBoundsCheck(index, limit)) return i; // Copy last Ndim-1 dimensions of updates[i] to params[index] for (int j = 0; j < cols; ++j) { const Variant& to_scatter = updates(i, j); params(index, j) = to_scatter; } } return -1; } }; template struct ScatterFunctor : ScatterFunctorVariantAssignBase {}; template struct ScatterFunctor : ScatterFunctorVariantAssignBase {}; #ifdef TENSORFLOW_USE_SYCL template struct ScatterFunctorBase { Index operator()(OpKernelContext* c, const SYCLDevice& d, typename TTypes::Matrix params, typename TTypes::ConstMatrix updates, typename TTypes::ConstFlat indices) { // indices and params sizes were validated in DoCompute(). const Index N = static_cast(indices.size()); const Index limit = static_cast(params.dimension(0)); for (Index i = 0; i < N; i++) { // Grab the index and check its validity. Do this carefully, // to avoid checking the value and grabbing it again from // memory a second time (a security risk since it may change in between). const Index index = ::tensorflow::internal::SubtleMustCopy(indices(i)); if (!FastBoundsCheck(index, limit)) return i; // Copy last Ndim-1 dimensions of updates[i] to params[index] scatter_op::internal::AssignSYCL::Run( d, params.template chip<0>(index), updates.template chip<0>(i)); } return -1; } }; #endif // TENSORFLOW_USE_SYCL template struct ScatterFunctorBase { Index operator()(OpKernelContext* c, const CPUDevice& d, typename TTypes::Matrix params, typename TTypes::ConstMatrix updates, typename TTypes::ConstFlat indices) { // indices and params sizes were validated in DoCompute(). const Index N = static_cast(indices.size()); const Index limit = static_cast(params.dimension(0)); if (!std::is_same::value) { for (Index i = 0; i < N; i++) { // Grab the index and check its validity. Do this carefully, // to avoid checking the value and grabbing it again from // memory a second time (a security risk since it may change in // between). const Index index = ::tensorflow::internal::SubtleMustCopy(indices(i)); if (!FastBoundsCheck(index, limit)) return i; memmove(params.data() + index * params.dimension(1), updates.data() + i * updates.dimension(1), updates.dimension(1) * sizeof(T)); } } else { for (Index i = 0; i < N; i++) { // Grab the index and check its validity. Do this carefully, // to avoid checking the value and grabbing it again from // memory a second time (a security risk since it may change in // between). const Index index = ::tensorflow::internal::SubtleMustCopy(indices(i)); if (!FastBoundsCheck(index, limit)) return i; // Copy last Ndim-1 dimensions of updates[i] to params[index] scatter_op::internal::Assign::Run( params.template chip<0>(index), updates.template chip<0>(i)); } } return -1; } }; template struct ScatterFunctor : ScatterFunctorBase {}; #ifdef TENSORFLOW_USE_SYCL template struct ScatterFunctorSYCL { Index operator()(OpKernelContext* c, const SYCLDevice& d, typename TTypes::Matrix params, typename TTypes::ConstMatrix updates, typename TTypes::Flat indices) { // indices and params sizes were validated in DoCompute(). const Index N = static_cast(indices.size()); const Index limit = static_cast(params.dimension(0)); for (Index i = 0; i < N; i++) { const Index index = ::tensorflow::internal::SubtleMustCopy(indices(i)); if (!FastBoundsCheck(index, limit)) return i; // Copy last Ndim-1 dimensions of updates[i] to params[index] scatter_op::internal::AssignSYCL::Run( d, params.template chip<0>(index), updates.template chip<0>(i)); } return -1; } }; #endif // TENSORFLOW_USE_SYCL template struct ScatterScalarFunctor { Index operator()(OpKernelContext* c, const Device& d, typename TTypes::Matrix params, const typename TTypes::ConstScalar update, typename TTypes::ConstFlat indices); }; template struct ScatterScalarFunctorBase { Index operator()(OpKernelContext* c, const Device& d, typename TTypes::Matrix params, const typename TTypes::ConstScalar update, typename TTypes::ConstFlat indices) { // indices and params sizes were validated in DoCompute(). const Index N = static_cast(indices.size()); const Index limit = static_cast(params.dimension(0)); for (Index i = 0; i < N; i++) { // Grab the index and check its validity. Do this carefully, // to avoid checking the value and grabbing it again from // memory a second time (a security risk since it may change in between). const Index index = ::tensorflow::internal::SubtleMustCopy(indices(i)); if (!FastBoundsCheck(index, limit)) return i; // Broadcast update to params[index] scatter_op::internal::Assign::RunScalar( params.template chip<0>(index), update()); } return -1; } }; template struct ScatterScalarFunctorVariantAssignBase { Index operator()(OpKernelContext* c, const Device& d, typename TTypes::Matrix params, const typename TTypes::ConstScalar update, typename TTypes::ConstFlat indices) { // indices and params sizes were validated in DoCompute(). const Index N = static_cast(indices.size()); const Index limit = static_cast(params.dimension(0)); const Index cols = static_cast(params.dimension(1)); const Variant& to_scatter = update(); for (Index i = 0; i < N; i++) { // Grab the index and check its validity. Do this carefully, // to avoid checking the value and grabbing it again from // memory a second time (a security risk since it may change in between). const Index index = ::tensorflow::internal::SubtleMustCopy(indices(i)); if (!FastBoundsCheck(index, limit)) return i; // Broadcast update to params[index] for (Index j = 0; j < cols; ++j) { params(index, j) = to_scatter; } } return -1; } }; template struct ScatterScalarFunctor : ScatterScalarFunctorVariantAssignBase {}; template struct ScatterScalarFunctor : ScatterScalarFunctorVariantAssignBase {}; #ifdef TENSORFLOW_USE_SYCL template struct ScatterScalarFunctorBase { Index operator()(OpKernelContext* c, const SYCLDevice& d, typename TTypes::Matrix params, const typename TTypes::ConstScalar update, typename TTypes::ConstFlat indices) { // indices and params sizes were validated in DoCompute(). const Index N = static_cast(indices.size()); const Index limit = static_cast(params.dimension(0)); for (Index i = 0; i < N; i++) { // Grab the index and check its validity. Do this carefully, // to avoid checking the value and grabbing it again from // memory a second time (a security risk since it may change in between). const Index index = ::tensorflow::internal::SubtleMustCopy(indices(i)); if (!FastBoundsCheck(index, limit)) return i; // Broadcast update to params[index] scatter_op::internal::AssignSYCL::RunScalar( d, params.template chip<0>(index), update); } return -1; } }; #endif // TENSORFLOW_USE_SYCL template struct ScatterScalarFunctorBase { Index operator()(OpKernelContext* c, const CPUDevice& d, typename TTypes::Matrix params, const typename TTypes::ConstScalar update, typename TTypes::ConstFlat indices) { // indices and params sizes were validated in DoCompute(). const Index N = static_cast(indices.size()); const Index limit = static_cast(params.dimension(0)); for (Index i = 0; i < N; i++) { // Grab the index and check its validity. Do this carefully, // to avoid checking the value and grabbing it again from // memory a second time (a security risk since it may change in between). const Index index = ::tensorflow::internal::SubtleMustCopy(indices(i)); if (!FastBoundsCheck(index, limit)) return i; // Broadcast update to params[index] scatter_op::internal::Assign::RunScalar( params.template chip<0>(index), update()); } return -1; } }; template struct ScatterScalarFunctor : ScatterScalarFunctorBase {}; #ifdef TENSORFLOW_USE_SYCL template struct ScatterScalarFunctorSYCL { Index operator()(OpKernelContext* c, const SYCLDevice& d, typename TTypes::Matrix params, const typename TTypes::ConstScalar update, typename TTypes::Flat indices) { // indices and params sizes were validated in DoCompute(). const Index N = static_cast(indices.size()); const Index limit = static_cast(params.dimension(0)); for (Index i = 0; i < N; i++) { const Index index = ::tensorflow::internal::SubtleMustCopy(indices(i)); if (!FastBoundsCheck(index, limit)) return i; // Broadcast update to params[index] scatter_op::internal::AssignSYCL::Run( d, params.template chip<0>(index), update()); } return -1; } }; #endif // TENSORFLOW_USE_SYCL } // namespace functor } // namespace tensorflow #endif // TENSORFLOW_CORE_KERNELS_SCATTER_FUNCTOR_H_