From 043b574402c58e1cf629242b3faad3ec071e5ce4 Mon Sep 17 00:00:00 2001 From: "Yan Facai (颜发才)" Date: Sat, 5 May 2018 17:16:04 +0800 Subject: ENH: add gradient function --- tensorflow/cc/gradients/math_grad.cc | 15 +++++++++++++++ tensorflow/cc/gradients/math_grad_test.cc | 10 ++++++++++ 2 files changed, 25 insertions(+) (limited to 'tensorflow/cc') diff --git a/tensorflow/cc/gradients/math_grad.cc b/tensorflow/cc/gradients/math_grad.cc index 52c177212a..ea86fc0a7c 100644 --- a/tensorflow/cc/gradients/math_grad.cc +++ b/tensorflow/cc/gradients/math_grad.cc @@ -440,6 +440,21 @@ Status RealDivGrad(const Scope& scope, const Operation& op, } REGISTER_GRADIENT_OP("RealDiv", RealDivGrad); +Status UnsafeDivGrad(const Scope& scope, const Operation& op, + const std::vector& grad_inputs, + std::vector* grad_outputs) { + auto x_1 = ConjugateHelper(scope, op.input(0)); + auto x_2 = ConjugateHelper(scope, op.input(1)); + // y = x_1 / x_2 + // dy/dx_1 = 1/x_2 + // dy/dx_2 = -x_1/x_2^2 + auto gx_1 = UnsafeDiv(scope, grad_inputs[0], x_2); + auto gx_2 = Mul(scope, grad_inputs[0], + UnsafeDiv(scope, UnsafeDiv(scope, Neg(scope, x_1), x_2), x_2)); + return BinaryGradCommon(scope, op, grad_outputs, gx_1, gx_2); +} +REGISTER_GRADIENT_OP("UnsafeDiv", DivGrad); + Status SquaredDifferenceGrad(const Scope& scope, const Operation& op, const std::vector& grad_inputs, std::vector* grad_outputs) { diff --git a/tensorflow/cc/gradients/math_grad_test.cc b/tensorflow/cc/gradients/math_grad_test.cc index 1b4c7c2688..0cc398abcf 100644 --- a/tensorflow/cc/gradients/math_grad_test.cc +++ b/tensorflow/cc/gradients/math_grad_test.cc @@ -46,6 +46,7 @@ using ops::RealDiv; using ops::SquaredDifference; using ops::Sub; using ops::Sum; +using ops::UnsafeDiv; using ops::Where3; // TODO(andydavis) Test gradient function against numeric gradients output. @@ -856,6 +857,15 @@ TEST_F(NaryGradTest, RealDiv) { RunTest({x}, {x_shape}, {y}, {x_shape}); } +TEST_F(NaryGradTest, UnsafeDiv) { + TensorShape x_shape({3, 2, 5}); + auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape)); + // Test x / (1 + |x|) rather than x_1 / x_2 to avoid triggering large + // division errors in the numeric estimator used by the gradient checker. + auto y = UnsafeDiv(scope_, x, Add(scope_, Const(scope_, 1), Abs(scope_, x))); + RunTest({x}, {x_shape}, {y}, {x_shape}); +} + TEST_F(NaryGradTest, SquaredDifference) { TensorShape x1_shape({3, 2, 5}); TensorShape x2_shape({2, 5}); -- cgit v1.2.3 From c8ebbd090d92ef6a49fa9f0f5a06952333beb2c8 Mon Sep 17 00:00:00 2001 From: "Yan Facai (颜发才)" Date: Sat, 5 May 2018 19:37:32 +0800 Subject: CLN: fix minor error --- tensorflow/cc/gradients/math_grad.cc | 2 +- tensorflow/core/ops/math_grad_test.cc | 4 ++-- tensorflow/python/ops/math_grad_test.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) (limited to 'tensorflow/cc') diff --git a/tensorflow/cc/gradients/math_grad.cc b/tensorflow/cc/gradients/math_grad.cc index ea86fc0a7c..a8909846c9 100644 --- a/tensorflow/cc/gradients/math_grad.cc +++ b/tensorflow/cc/gradients/math_grad.cc @@ -453,7 +453,7 @@ Status UnsafeDivGrad(const Scope& scope, const Operation& op, UnsafeDiv(scope, UnsafeDiv(scope, Neg(scope, x_1), x_2), x_2)); return BinaryGradCommon(scope, op, grad_outputs, gx_1, gx_2); } -REGISTER_GRADIENT_OP("UnsafeDiv", DivGrad); +REGISTER_GRADIENT_OP("UnsafeDiv", UnsafeDivGrad); Status SquaredDifferenceGrad(const Scope& scope, const Operation& op, const std::vector& grad_inputs, diff --git a/tensorflow/core/ops/math_grad_test.cc b/tensorflow/core/ops/math_grad_test.cc index c40b1bf198..6d60bcd77c 100644 --- a/tensorflow/core/ops/math_grad_test.cc +++ b/tensorflow/core/ops/math_grad_test.cc @@ -746,7 +746,7 @@ TEST_F(MathGradTest, UnsafeDiv) { } }; test::ExpectClose(dx, test::AsTensor( - {g(0.f, -10.f), g(-3.f, -10.f), g(-2.f, -0.f), + {g(0.f, -10.f), g(-3.f, -10.f), g(-2.f, -10.f), g(-1.f, 0.f), g(0.f, 0.f), g(1.f, 0.f), g(2.f, 10.f), g(3.f, 10.f), g(0.f, 10.f)}, TensorShape({3, 3}))); @@ -762,7 +762,7 @@ TEST_F(MathGradTest, UnsafeDiv) { }; test::ExpectClose(dy, test::AsTensor( - {g(0.f, -10.f) + g(-3.f, -10.f) + g(-2.f, -0.f), + {g(0.f, -10.f) + g(-3.f, -10.f) + g(-2.f, -10.f), g(-1.f, 0.f) + g(0.f, 0.f) + g(1.f, 0.f), g(2.f, 10.f) + g(3.f, 10.f) + g(0.f, 10.f)}, TensorShape({3, 1}))); diff --git a/tensorflow/python/ops/math_grad_test.py b/tensorflow/python/ops/math_grad_test.py index 525cbd202c..178b90288d 100644 --- a/tensorflow/python/ops/math_grad_test.py +++ b/tensorflow/python/ops/math_grad_test.py @@ -214,7 +214,7 @@ class UnsafeDivGradientTest(test.TestCase): def testBasicGradient(self): inputs = constant_op.constant(np.arange(-3, 3), dtype=dtypes.float32) - outputs = math_ops.unsafe_div(inputs, 1 + abs(inputs)) + outputs = math_ops.unsafe_div(inputs, 1 + math_ops.abs(inputs)) with self.test_session(): error = gradient_checker.compute_gradient_error( inputs, inputs.get_shape().as_list(), -- cgit v1.2.3 From fee3f260d6eba1aec57df09045459790dcae686f Mon Sep 17 00:00:00 2001 From: "Yan Facai (颜发才)" Date: Mon, 30 Jul 2018 13:17:21 +0800 Subject: TST: add test case, division by zero --- tensorflow/cc/gradients/math_grad_test.cc | 33 +++++++++++++++++++++++++------ 1 file changed, 27 insertions(+), 6 deletions(-) (limited to 'tensorflow/cc') diff --git a/tensorflow/cc/gradients/math_grad_test.cc b/tensorflow/cc/gradients/math_grad_test.cc index b76478d78b..27021e28f8 100644 --- a/tensorflow/cc/gradients/math_grad_test.cc +++ b/tensorflow/cc/gradients/math_grad_test.cc @@ -13,8 +13,10 @@ See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ +#include "tensorflow/cc/client/client_session.h" #include "tensorflow/cc/framework/grad_op_registry.h" #include "tensorflow/cc/framework/gradient_checker.h" +#include "tensorflow/cc/framework/gradients.h" #include "tensorflow/cc/framework/testutil.h" #include "tensorflow/cc/gradients/grad_testutil.h" #include "tensorflow/cc/ops/standard_ops.h" @@ -857,12 +859,31 @@ TEST_F(NaryGradTest, RealDiv) { } TEST_F(NaryGradTest, UnsafeDiv) { - TensorShape x_shape({3, 2, 5}); - auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape)); - // Test x / (1 + |x|) rather than x_1 / x_2 to avoid triggering large - // division errors in the numeric estimator used by the gradient checker. - auto y = UnsafeDiv(scope_, x, Add(scope_, Const(scope_, 1), Abs(scope_, x))); - RunTest({x}, {x_shape}, {y}, {x_shape}); + { + TensorShape x_shape({3, 2, 5}); + const auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape)); + // Test x / (1 + |x|) rather than x_1 / x_2 to avoid triggering large + // division errors in the numeric estimator used by the gradient checker. + const auto y = UnsafeDiv(scope_, x, Add(scope_, Const(scope_, 1), Abs(scope_, x))); + RunTest({x}, {x_shape}, {y}, {x_shape}); + } + { + // Return 0 gradient (rather than NaN) for division by zero. + const auto x = Placeholder(scope_, DT_FLOAT); + const auto zero = Const(scope_, 0.0); + const auto y = UnsafeDiv(scope_, x, zero); + + std::vector grad_outputs; + TF_EXPECT_OK(AddSymbolicGradients(scope_, {y}, {x}, &grad_outputs)); + ClientSession session(scope_); + std::vector grad_result; + TF_EXPECT_OK(session.Run({{x, {-3.0f, 0.0f, 3.0f}}}, grad_outputs, &grad_result)); + EXPECT_EQ(grad_result.size(), 1); + EXPECT_EQ(grad_result[0].NumElements(), 3); + EXPECT_EQ(grad_result[0].flat()(0), 0.0f); + EXPECT_EQ(grad_result[0].flat()(1), 0.0f); + EXPECT_EQ(grad_result[0].flat()(2), 0.0f); + } } TEST_F(NaryGradTest, SquaredDifference) { -- cgit v1.2.3