/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // See docs in ../ops/nn_ops.cc. #define EIGEN_USE_THREADS #include "tensorflow/core/kernels/relu_op.h" #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor" #include "tensorflow/core/framework/numeric_op.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/lib/core/errors.h" namespace tensorflow { typedef Eigen::ThreadPoolDevice CPUDevice; typedef Eigen::GpuDevice GPUDevice; #ifdef TENSORFLOW_USE_SYCL typedef Eigen::SyclDevice SYCLDevice; #endif // TENSORFLOW_USE_SYCL #define REGISTER_RELU_KERNELS(type) \ REGISTER_KERNEL_BUILDER( \ Name("Relu").Device(DEVICE_CPU).TypeConstraint("T"), \ ReluOp); \ REGISTER_KERNEL_BUILDER( \ Name("ReluGrad").Device(DEVICE_CPU).TypeConstraint("T"), \ ReluGradOp); \ REGISTER_KERNEL_BUILDER( \ Name("Relu6").Device(DEVICE_CPU).TypeConstraint("T"), \ Relu6Op); \ REGISTER_KERNEL_BUILDER( \ Name("Relu6Grad").Device(DEVICE_CPU).TypeConstraint("T"), \ Relu6GradOp) \ REGISTER_KERNEL_BUILDER( \ Name("LeakyRelu").Device(DEVICE_CPU).TypeConstraint("T"), \ LeakyReluOp); \ REGISTER_KERNEL_BUILDER( \ Name("LeakyReluGrad").Device(DEVICE_CPU).TypeConstraint("T"), \ LeakyReluGradOp); TF_CALL_REAL_NUMBER_TYPES(REGISTER_RELU_KERNELS); #undef REGISTER_RELU_KERNELS #define REGISTER_ELU_KERNELS(type) \ REGISTER_KERNEL_BUILDER( \ Name("Elu").Device(DEVICE_CPU).TypeConstraint("T"), \ EluOp); \ REGISTER_KERNEL_BUILDER( \ Name("EluGrad").Device(DEVICE_CPU).TypeConstraint("T"), \ EluGradOp); \ REGISTER_KERNEL_BUILDER( \ Name("Selu").Device(DEVICE_CPU).TypeConstraint("T"), \ SeluOp); \ REGISTER_KERNEL_BUILDER( \ Name("SeluGrad").Device(DEVICE_CPU).TypeConstraint("T"), \ SeluGradOp) // Elu and Selu only make sense with float or double. TF_CALL_GPU_NUMBER_TYPES(REGISTER_ELU_KERNELS); #undef REGISTER_ELU_KERNELS #if GOOGLE_CUDA // Forward declarations of the functor specializations for GPU. namespace functor { #define DECLARE_GPU_SPEC(T) \ template <> \ void Relu::operator()( \ const GPUDevice& d, typename TTypes::ConstTensor features, \ typename TTypes::Tensor activations); \ extern template struct Relu; \ \ template <> \ void ReluGrad::operator()( \ const GPUDevice& d, typename TTypes::ConstTensor gradients, \ typename TTypes::ConstTensor features, \ typename TTypes::Tensor backprops); \ extern template struct ReluGrad; \ \ template <> \ void Relu6::operator()( \ const GPUDevice& d, typename TTypes::ConstTensor features, \ typename TTypes::Tensor activations); \ extern template struct Relu6; \ \ template <> \ void Relu6Grad::operator()( \ const GPUDevice& d, typename TTypes::ConstTensor gradients, \ typename TTypes::ConstTensor features, \ typename TTypes::Tensor backprops); \ extern template struct Relu6Grad; \ \ template <> \ void LeakyRelu::operator()( \ const GPUDevice& d, typename TTypes::ConstTensor features, T alpha, \ typename TTypes::Tensor activations); \ extern template struct LeakyRelu; \ \ template <> \ void LeakyReluGrad::operator()( \ const GPUDevice& d, typename TTypes::ConstTensor gradients, \ typename TTypes::ConstTensor features, T alpha, \ typename TTypes::Tensor backprops); \ extern template struct LeakyReluGrad; \ \ template <> \ void Elu::operator()(const GPUDevice& d, \ typename TTypes::ConstTensor features, \ typename TTypes::Tensor activations); \ extern template struct Elu; \ \ template <> \ void EluGrad::operator()( \ const GPUDevice& d, typename TTypes::ConstTensor gradients, \ typename TTypes::ConstTensor activations, \ typename TTypes::Tensor backprops); \ extern template struct EluGrad; \ \ template <> \ void Selu::operator()( \ const GPUDevice& d, typename TTypes::ConstTensor features, \ typename TTypes::Tensor activations); \ extern template struct Selu; \ \ template <> \ void SeluGrad::operator()( \ const GPUDevice& d, typename TTypes::ConstTensor gradients, \ typename TTypes::ConstTensor activations, \ typename TTypes::Tensor backprops); \ extern template struct SeluGrad; template <> void Relu::operator()( const GPUDevice& d, typename TTypes::ConstTensor features, typename TTypes::Tensor activations); extern template struct Relu; TF_CALL_GPU_NUMBER_TYPES(DECLARE_GPU_SPEC); } // namespace functor // Registration of the GPU implementations. #define REGISTER_GPU_KERNELS(type) \ REGISTER_KERNEL_BUILDER( \ Name("Relu").Device(DEVICE_GPU).TypeConstraint("T"), \ ReluOp); \ REGISTER_KERNEL_BUILDER( \ Name("ReluGrad").Device(DEVICE_GPU).TypeConstraint("T"), \ ReluGradOp); \ REGISTER_KERNEL_BUILDER( \ Name("Relu6").Device(DEVICE_GPU).TypeConstraint("T"), \ Relu6Op); \ REGISTER_KERNEL_BUILDER( \ Name("Relu6Grad").Device(DEVICE_GPU).TypeConstraint("T"), \ Relu6GradOp); \ REGISTER_KERNEL_BUILDER( \ Name("LeakyRelu").Device(DEVICE_GPU).TypeConstraint("T"), \ LeakyReluOp); \ REGISTER_KERNEL_BUILDER( \ Name("LeakyReluGrad").Device(DEVICE_GPU).TypeConstraint("T"), \ LeakyReluGradOp); \ REGISTER_KERNEL_BUILDER( \ Name("Elu").Device(DEVICE_GPU).TypeConstraint("T"), \ EluOp); \ REGISTER_KERNEL_BUILDER( \ Name("EluGrad").Device(DEVICE_GPU).TypeConstraint("T"), \ EluGradOp); \ REGISTER_KERNEL_BUILDER( \ Name("Selu").Device(DEVICE_GPU).TypeConstraint("T"), \ SeluOp); \ REGISTER_KERNEL_BUILDER( \ Name("SeluGrad").Device(DEVICE_GPU).TypeConstraint("T"), \ SeluGradOp) TF_CALL_GPU_NUMBER_TYPES(REGISTER_GPU_KERNELS); #undef REGISTER_GPU_KERNELS template class ReluOp : public UnaryElementWiseOp> { public: using UnaryElementWiseOp>::UnaryElementWiseOp; void Operate(OpKernelContext* context, const Tensor& input, Tensor* output) { auto flat_input = input.flat(); OP_REQUIRES(context, (flat_input.size() % 4) == 0, errors::InvalidArgument( "Tensor size must be a multiple of 4 for Relu. Got ", flat_input.size())); functor::Relu func; func(context->eigen_device(), flat_input, output->flat()); } }; REGISTER_KERNEL_BUILDER( Name("Relu").Device(DEVICE_GPU).TypeConstraint("T"), ReluOp); #endif // GOOGLE_CUDA #ifdef TENSORFLOW_USE_SYCL // Registration of the GPU implementations. #define REGISTER_SYCL_KERNELS(type) \ REGISTER_KERNEL_BUILDER( \ Name("Relu").Device(DEVICE_SYCL).TypeConstraint("T"), \ ReluOp); \ REGISTER_KERNEL_BUILDER( \ Name("ReluGrad").Device(DEVICE_SYCL).TypeConstraint("T"), \ ReluGradOp); \ REGISTER_KERNEL_BUILDER( \ Name("Relu6").Device(DEVICE_SYCL).TypeConstraint("T"), \ Relu6Op); \ REGISTER_KERNEL_BUILDER( \ Name("Relu6Grad").Device(DEVICE_SYCL).TypeConstraint("T"), \ Relu6GradOp); \ REGISTER_KERNEL_BUILDER( \ Name("LeakyRelu").Device(DEVICE_SYCL).TypeConstraint("T"), \ LeakyReluOp); \ REGISTER_KERNEL_BUILDER( \ Name("LeakyReluGrad").Device(DEVICE_SYCL).TypeConstraint("T"), \ LeakyReluGradOp); \ REGISTER_KERNEL_BUILDER( \ Name("Elu").Device(DEVICE_SYCL).TypeConstraint("T"), \ EluOp); \ REGISTER_KERNEL_BUILDER( \ Name("EluGrad").Device(DEVICE_SYCL).TypeConstraint("T"), \ EluGradOp); \ REGISTER_KERNEL_BUILDER( \ Name("Selu").Device(DEVICE_SYCL).TypeConstraint("T"), \ SeluOp); \ REGISTER_KERNEL_BUILDER( \ Name("SeluGrad").Device(DEVICE_SYCL).TypeConstraint("T"), \ SeluGradOp) TF_CALL_GPU_NUMBER_TYPES_NO_HALF(REGISTER_SYCL_KERNELS); #undef REGISTER_SYCL_KERNELS #endif // TENSORFLOW_USE_SYCL } // namespace tensorflow