aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/cc
diff options
context:
space:
mode:
authorGravatar Cao Zongyan <zongyan.cao@alibaba-inc.com>2018-08-16 19:04:37 +0800
committerGravatar Cao Zongyan <zongyan.cao@alibaba-inc.com>2018-08-16 22:04:45 +0800
commitaa25cc078c9b55e5ca3e0f59df43e169bfee8f3c (patch)
tree7348b2db46ab464be34cc385d2509f7260727bbd /tensorflow/cc
parent938b9a40787028c58fb548fa6ada8c0dd8180f35 (diff)
Add LeakyRelu C++ Op and its gradient implementation.
LeakyRelu, defined as 'y = { x (x>=0) or alpha*x (x<0) }', was computed by combined Ops 'max(x, alpha*x)' in current codes. Hence its gradient calculation for back propagation would contain a serial of element-wise Ops. This looks really unnecessary for such a simple op and it could be done within just one Op with less memory accesses.
Diffstat (limited to 'tensorflow/cc')
-rw-r--r--tensorflow/cc/gradients/nn_grad.cc13
-rw-r--r--tensorflow/cc/gradients/nn_grad_test.cc13
2 files changed, 26 insertions, 0 deletions
diff --git a/tensorflow/cc/gradients/nn_grad.cc b/tensorflow/cc/gradients/nn_grad.cc
index 588e96cb19..0fc23d0bf7 100644
--- a/tensorflow/cc/gradients/nn_grad.cc
+++ b/tensorflow/cc/gradients/nn_grad.cc
@@ -143,6 +143,19 @@ Status Relu6GradHelper(const Scope& scope, const Operation& op,
}
REGISTER_GRADIENT_OP("Relu6", Relu6GradHelper);
+Status LeakyReluGradHelper(const Scope& scope, const Operation& op,
+ const std::vector<Output>& grad_inputs,
+ std::vector<Output>* grad_outputs) {
+ float alpha;
+ TF_RETURN_IF_ERROR(GetNodeAttr(op.node()->attrs(), "alpha", &alpha));
+ internal::LeakyReluGrad::Attrs attrs;
+ attrs.Alpha(alpha);
+ auto dx = internal::LeakyReluGrad(scope, grad_inputs[0], op.input(0), attrs);
+ grad_outputs->push_back(dx);
+ return scope.status();
+}
+REGISTER_GRADIENT_OP("LeakyRelu", LeakyReluGradHelper);
+
Status EluGradHelper(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
diff --git a/tensorflow/cc/gradients/nn_grad_test.cc b/tensorflow/cc/gradients/nn_grad_test.cc
index aa72cf7ba2..5ebece7b6e 100644
--- a/tensorflow/cc/gradients/nn_grad_test.cc
+++ b/tensorflow/cc/gradients/nn_grad_test.cc
@@ -41,6 +41,7 @@ using ops::MaxPoolV2;
using ops::Placeholder;
using ops::Relu;
using ops::Relu6;
+using ops::LeakyRelu;
using ops::Selu;
using ops::Softmax;
using ops::Softplus;
@@ -160,6 +161,18 @@ TEST_F(NNGradTest, Relu6Grad) {
RunTest(x, x_init_value, y, shape);
}
+TEST_F(NNGradTest, LeakyReluGrad) {
+ TensorShape shape({5, 2});
+ auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
+ auto y = LeakyRelu(scope_, x);
+ // Avoid input values where Leaky ReLU gradient is not well defined (around
+ // zero).
+ Tensor x_init_value = test::AsTensor<float>(
+ {-0.9f, -0.7f, -0.5f, -0.3f, -0.1f, 0.1f, 0.3f, 0.5f, 0.7f, 0.9f},
+ {5, 2});
+ RunTest(x, x_init_value, y, shape);
+}
+
TEST_F(NNGradTest, EluGrad) {
TensorShape shape({5, 2});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));