/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #ifndef TENSORFLOW_KERNELS_TRAINING_OPS_H_ #define TENSORFLOW_KERNELS_TRAINING_OPS_H_ #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor" #include "tensorflow/core/framework/tensor_types.h" #include "tensorflow/core/platform/types.h" namespace tensorflow { namespace functor { // Each training algorithm has a ApplyXYZ functor struct declared in // this header file. They are specialized for different devices // (CPUDevice in training_ops.cc or GPUDevice in training_ops_gpu.cc). template struct ApplyGradientDescent { void operator()(const Device& d, typename TTypes::Flat var, typename TTypes::ConstScalar alpha, typename TTypes::ConstFlat delta); }; template struct ApplyAdadelta { void operator()(const Device& d, typename TTypes::Flat var, typename TTypes::Flat accum, typename TTypes::Flat accum_update, typename TTypes::ConstScalar lr, typename TTypes::ConstScalar rho, typename TTypes::ConstScalar epsilon, typename TTypes::ConstFlat grad); }; template struct FobosElasticNet { void operator()(const Device& d, typename TTypes::Flat var, typename TTypes::ConstScalar lr, typename TTypes::ConstScalar l1, typename TTypes::ConstScalar l2, typename TTypes::ConstFlat grad); }; template struct ApplyProximalGradientDescent { void operator()(const Device& d, typename TTypes::Flat var, typename TTypes::ConstScalar lr, typename TTypes::ConstScalar l1, typename TTypes::ConstScalar l2, typename TTypes::ConstFlat grad); }; template struct ApplyAdagrad { void operator()(const Device& d, typename TTypes::Flat var, typename TTypes::Flat accum, typename TTypes::ConstScalar lr, typename TTypes::ConstFlat grad); }; template struct ApplyAdagradDA { void operator()(const Device& d, typename TTypes::Flat var, typename TTypes::Flat gradient_accum, typename TTypes::Flat gradient_squared_accum, typename TTypes::ConstScalar lr, int64 global_step, typename TTypes::ConstScalar l1, typename TTypes::ConstScalar l2, typename TTypes::ConstFlat grad); }; template struct ApplyProximalAdagrad { void operator()(const Device& d, typename TTypes::Flat var, typename TTypes::Flat accum, typename TTypes::ConstScalar lr, typename TTypes::ConstScalar l1, typename TTypes::ConstScalar l2, typename TTypes::ConstFlat grad); }; template struct ApplyFtrl { void operator()(const Device& d, typename TTypes::Flat var, typename TTypes::Flat accum, typename TTypes::Flat linear, typename TTypes::ConstFlat grad, typename TTypes::ConstScalar lr, typename TTypes::ConstScalar l1, typename TTypes::ConstScalar l2, typename TTypes::ConstScalar lr_power); }; template struct ApplyFtrlV2 { void operator()(const Device& d, typename TTypes::Flat var, typename TTypes::Flat accum, typename TTypes::Flat linear, typename TTypes::ConstFlat grad, typename TTypes::ConstScalar lr, typename TTypes::ConstScalar l1, typename TTypes::ConstScalar l2, typename TTypes::ConstScalar l2_shrinkage, typename TTypes::ConstScalar lr_power); }; template struct ApplyMomentum { void operator()(const Device& d, typename TTypes::Flat var, typename TTypes::Flat accum, typename TTypes::ConstScalar lr, typename TTypes::ConstFlat grad, typename TTypes::ConstScalar momentum, bool use_nesterov); }; template struct ApplyAdam { void operator()(const Device& d, typename TTypes::Flat var, typename TTypes::Flat m, typename TTypes::Flat v, typename TTypes::ConstScalar beta1_power, typename TTypes::ConstScalar beta2_power, typename TTypes::ConstScalar lr, typename TTypes::ConstScalar beta1, typename TTypes::ConstScalar beta2, typename TTypes::ConstScalar epsilon, typename TTypes::ConstFlat grad, bool use_nesterov); }; template struct ApplyAdaMax { void operator()(const Device& d, typename TTypes::Flat var, typename TTypes::Flat m, typename TTypes::Flat v, typename TTypes::ConstScalar beta1_power, typename TTypes::ConstScalar lr, typename TTypes::ConstScalar beta1, typename TTypes::ConstScalar beta2, typename TTypes::ConstScalar epsilon, typename TTypes::ConstFlat grad); }; template struct ApplyRMSProp { void operator()(const Device& d, typename TTypes::Flat var, typename TTypes::Flat ms, typename TTypes::Flat mom, typename TTypes::ConstScalar lr, typename TTypes::ConstScalar rho, typename TTypes::ConstScalar momentum, typename TTypes::ConstScalar epsilon, typename TTypes::ConstFlat grad); }; template struct ApplyCenteredRMSProp { void operator()(const Device& d, typename TTypes::Flat var, typename TTypes::Flat mg, typename TTypes::Flat ms, typename TTypes::Flat mom, typename TTypes::ConstScalar lr, typename TTypes::ConstScalar rho, typename TTypes::ConstScalar momentum, typename TTypes::ConstScalar epsilon, typename TTypes::ConstFlat grad); }; template struct ApplyAddSign { void operator()(const Device& d, typename TTypes::Flat var, typename TTypes::Flat m, typename TTypes::ConstScalar lr, typename TTypes::ConstScalar alpha, typename TTypes::ConstScalar sign_decay, typename TTypes::ConstScalar beta, typename TTypes::ConstFlat grad); }; template struct ApplyPowerSign { void operator()(const Device& d, typename TTypes::Flat var, typename TTypes::Flat m, typename TTypes::ConstScalar lr, typename TTypes::ConstScalar logbase, typename TTypes::ConstScalar sign_decay, typename TTypes::ConstScalar beta, typename TTypes::ConstFlat grad); }; } // end namespace functor } // end namespace tensorflow #endif // TENSORFLOW_KERNELS_TRAINING_OPS_H_