aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar Cao Zongyan <zongyan.cao@alibaba-inc.com>2018-09-03 11:48:35 +0800
committerGravatar Cao Zongyan <zongyan.cao@alibaba-inc.com>2018-09-03 15:12:10 +0800
commit2586eb3bfeeef3af357e438ae5aff92d2bac12a5 (patch)
tree4b486481d06f9ef506955639bf60110d0c88a2ad
parent4e72dd865a3fc83baa69f6b7c08720a1b546a464 (diff)
Code fix against ci_build error results.
-rw-r--r--tensorflow/cc/gradients/nn_grad_test.cc3
-rw-r--r--tensorflow/core/kernels/relu_op.cc8
-rw-r--r--tensorflow/core/kernels/relu_op.h8
-rw-r--r--tensorflow/core/kernels/relu_op_functor.h1
-rw-r--r--tensorflow/python/kernel_tests/relu_op_test.py50
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.pbtxt4
6 files changed, 39 insertions, 35 deletions
diff --git a/tensorflow/cc/gradients/nn_grad_test.cc b/tensorflow/cc/gradients/nn_grad_test.cc
index bf0db1f59d..d8c2a1a0fc 100644
--- a/tensorflow/cc/gradients/nn_grad_test.cc
+++ b/tensorflow/cc/gradients/nn_grad_test.cc
@@ -180,8 +180,7 @@ TEST_F(NNGradTest, LeakyReluGradGrad) {
// Avoid input values where Leaky ReLU gradient is not well defined (around
// zero).
Tensor x_init_value = test::AsTensor<float>(
- {2.3f, 1.9f, 1.5f, 1.1f, 0.7f, 0.3f, -0.1f, -0.5f, -0.9f, -1.3f},
- {5, 2});
+ {2.3f, 1.9f, 1.5f, 1.1f, 0.7f, 0.3f, -0.1f, -0.5f, -0.9f, -1.3f}, {5, 2});
Tensor features = test::AsTensor<float>(
{-0.9f, -0.7f, -0.5f, -0.3f, -0.1f, 0.1f, 0.3f, 0.5f, 0.7f, 0.9f},
{5, 2});
diff --git a/tensorflow/core/kernels/relu_op.cc b/tensorflow/core/kernels/relu_op.cc
index c4f2ef5632..cafa49cbb6 100644
--- a/tensorflow/core/kernels/relu_op.cc
+++ b/tensorflow/core/kernels/relu_op.cc
@@ -106,15 +106,15 @@ namespace functor {
\
template <> \
void LeakyRelu<GPUDevice, T>::operator()( \
- const GPUDevice& d, typename TTypes<T>::ConstTensor features, \
- T alpha, typename TTypes<T>::Tensor activations); \
+ const GPUDevice& d, typename TTypes<T>::ConstTensor features, T alpha, \
+ typename TTypes<T>::Tensor activations); \
extern template struct LeakyRelu<GPUDevice, T>; \
\
template <> \
void LeakyReluGrad<GPUDevice, T>::operator()( \
const GPUDevice& d, typename TTypes<T>::ConstTensor gradients, \
- typename TTypes<T>::ConstTensor features, \
- T alpha, typename TTypes<T>::Tensor backprops); \
+ typename TTypes<T>::ConstTensor features, T alpha, \
+ typename TTypes<T>::Tensor backprops); \
extern template struct LeakyReluGrad<GPUDevice, T>; \
\
template <> \
diff --git a/tensorflow/core/kernels/relu_op.h b/tensorflow/core/kernels/relu_op.h
index c55190065c..fa79ab03ae 100644
--- a/tensorflow/core/kernels/relu_op.h
+++ b/tensorflow/core/kernels/relu_op.h
@@ -143,8 +143,8 @@ class LeakyReluOp : public UnaryElementWiseOp<T, LeakyReluOp<Device, T>> {
void Operate(OpKernelContext* context, const Tensor& input, Tensor* output) {
functor::LeakyRelu<Device, T> functor;
- functor(context->eigen_device<Device>(), input.flat<T>(),
- alpha_, output->flat<T>());
+ functor(context->eigen_device<Device>(), input.flat<T>(), alpha_,
+ output->flat<T>());
}
private:
@@ -183,7 +183,9 @@ class LeakyReluGradOp
template <typename Device, typename T>
void LeakyReluGradOp<Device, T>::OperateNoTemplate(OpKernelContext* context,
- const Tensor& g, const Tensor& a, T alpha, Tensor* output) {
+ const Tensor& g,
+ const Tensor& a, T alpha,
+ Tensor* output) {
if (!ReluHelpers::ValidateSameSize(context, g, a)) return;
functor::LeakyReluGrad<Device, T> functor;
functor(context->eigen_device<Device>(), g.flat<T>(), a.flat<T>(), alpha,
diff --git a/tensorflow/core/kernels/relu_op_functor.h b/tensorflow/core/kernels/relu_op_functor.h
index 7f0951451d..548d5a277d 100644
--- a/tensorflow/core/kernels/relu_op_functor.h
+++ b/tensorflow/core/kernels/relu_op_functor.h
@@ -91,7 +91,6 @@ struct Relu6Grad {
}
};
-
// Functor used by LeakyReluOp to do the computations.
template <typename Device, typename T>
struct LeakyRelu {
diff --git a/tensorflow/python/kernel_tests/relu_op_test.py b/tensorflow/python/kernel_tests/relu_op_test.py
index 7066f28883..3e24b8a2c4 100644
--- a/tensorflow/python/kernel_tests/relu_op_test.py
+++ b/tensorflow/python/kernel_tests/relu_op_test.py
@@ -323,37 +323,37 @@ class LeakyReluTest(test.TestCase):
def testGradGradFloat32(self):
with compat.forward_compatibility_horizon(2018, 10, 2):
with self.test_session():
- x = constant_op.constant(
- [-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
- shape=[2, 5],
- name="x")
- y = nn_ops.leaky_relu(x, alpha=0.1, name="leaky_relu")
- z = gradients_impl.gradients(y, x)
- x_init = np.asarray(
- [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
- dtype=np.float32,
- order="F")
- err = gradient_checker.compute_gradient_error(
- x, [2, 5], z[0], [2, 5], x_init_value=x_init)
+ x = constant_op.constant(
+ [-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
+ shape=[2, 5],
+ name="x")
+ y = nn_ops.leaky_relu(x, alpha=0.1, name="leaky_relu")
+ z = gradients_impl.gradients(y, x)
+ x_init = np.asarray(
+ [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
+ dtype=np.float32,
+ order="F")
+ err = gradient_checker.compute_gradient_error(
+ x, [2, 5], z[0], [2, 5], x_init_value=x_init)
print("leaky_relu (float32) gradient of gradient err = ", err)
self.assertLess(err, 1e-4)
def testGradGradFloat64(self):
with compat.forward_compatibility_horizon(2018, 10, 2):
with self.test_session():
- x = constant_op.constant(
- [-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
- shape=[2, 5],
- dtype=dtypes.float64,
- name="x")
- y = nn_ops.leaky_relu(x, alpha=0.02, name="leaky_relu")
- z = gradients_impl.gradients(y, x)
- x_init = np.asarray(
- [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
- dtype=np.float64,
- order="F")
- err = gradient_checker.compute_gradient_error(
- x, [2, 5], z[0], [2, 5], x_init_value=x_init)
+ x = constant_op.constant(
+ [-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
+ shape=[2, 5],
+ dtype=dtypes.float64,
+ name="x")
+ y = nn_ops.leaky_relu(x, alpha=0.02, name="leaky_relu")
+ z = gradients_impl.gradients(y, x)
+ x_init = np.asarray(
+ [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
+ dtype=np.float64,
+ order="F")
+ err = gradient_checker.compute_gradient_error(
+ x, [2, 5], z[0], [2, 5], x_init_value=x_init)
print("leaky_relu (float64) gradient of gradient err = ", err)
self.assertLess(err, 1e-10)
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.pbtxt
index 4de662fe33..9e8d320f06 100644
--- a/tensorflow/tools/api/golden/v1/tensorflow.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.pbtxt
@@ -1325,6 +1325,10 @@ tf_module {
argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
}
member_method {
+ name: "leaky_relu"
+ argspec: "args=[\'features\', \'alpha\', \'name\'], varargs=None, keywords=None, defaults=[\'0.2\', \'None\'], "
+ }
+ member_method {
name: "less"
argspec: "args=[\'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
}