From 3c83ef9fbc8dc23ab0878cffa13ecbfd07ac70e5 Mon Sep 17 00:00:00 2001 From: "Yan Facai (颜发才)" Date: Tue, 14 Aug 2018 14:08:15 +0800 Subject: CLN: rename UnsafeDiv => DivNoNan --- tensorflow/cc/gradients/math_grad.cc | 15 +++++++-------- tensorflow/cc/gradients/math_grad_test.cc | 8 ++++---- 2 files changed, 11 insertions(+), 12 deletions(-) (limited to 'tensorflow/cc') diff --git a/tensorflow/cc/gradients/math_grad.cc b/tensorflow/cc/gradients/math_grad.cc index c6e60689fa..cd215f740d 100644 --- a/tensorflow/cc/gradients/math_grad.cc +++ b/tensorflow/cc/gradients/math_grad.cc @@ -441,21 +441,20 @@ Status RealDivGrad(const Scope& scope, const Operation& op, } REGISTER_GRADIENT_OP("RealDiv", RealDivGrad); -Status UnsafeDivGrad(const Scope& scope, const Operation& op, - const std::vector& grad_inputs, - std::vector* grad_outputs) { +Status DivNoNanGrad(const Scope& scope, const Operation& op, + const std::vector& grad_inputs, + std::vector* grad_outputs) { auto x_1 = ConjugateHelper(scope, op.input(0)); auto x_2 = ConjugateHelper(scope, op.input(1)); // y = x_1 / x_2 // dy/dx_1 = 1/x_2 // dy/dx_2 = -x_1/x_2^2 - auto gx_1 = UnsafeDiv(scope, grad_inputs[0], x_2); - auto gx_2 = - Mul(scope, grad_inputs[0], - UnsafeDiv(scope, UnsafeDiv(scope, Neg(scope, x_1), x_2), x_2)); + auto gx_1 = DivNoNan(scope, grad_inputs[0], x_2); + auto gx_2 = Mul(scope, grad_inputs[0], + DivNoNan(scope, DivNoNan(scope, Neg(scope, x_1), x_2), x_2)); return BinaryGradCommon(scope, op, grad_outputs, gx_1, gx_2); } -REGISTER_GRADIENT_OP("UnsafeDiv", UnsafeDivGrad); +REGISTER_GRADIENT_OP("DivNoNan", DivNoNanGrad); Status SquaredDifferenceGrad(const Scope& scope, const Operation& op, const std::vector& grad_inputs, diff --git a/tensorflow/cc/gradients/math_grad_test.cc b/tensorflow/cc/gradients/math_grad_test.cc index 12a19bcf28..147428cc39 100644 --- a/tensorflow/cc/gradients/math_grad_test.cc +++ b/tensorflow/cc/gradients/math_grad_test.cc @@ -33,6 +33,7 @@ using ops::AddN; using ops::BatchMatMul; using ops::Const; using ops::Div; +using ops::DivNoNan; using ops::MatMul; using ops::Max; using ops::Maximum; @@ -47,7 +48,6 @@ using ops::RealDiv; using ops::SquaredDifference; using ops::Sub; using ops::Sum; -using ops::UnsafeDiv; using ops::Where3; // TODO(andydavis) Test gradient function against numeric gradients output. @@ -854,13 +854,13 @@ TEST_F(NaryGradTest, RealDiv) { RunTest({x}, {x_shape}, {y}, {x_shape}); } -TEST_F(NaryGradTest, UnsafeDiv) { +TEST_F(NaryGradTest, DivNoNan) { { TensorShape x_shape({3, 2, 5}); const auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape)); // Test x / (1 + |x|) rather than x_1 / x_2 to avoid triggering large // division errors in the numeric estimator used by the gradient checker. - const auto y = UnsafeDiv( + const auto y = DivNoNan( scope_, x, Add(scope_, Const(scope_, 1), Abs(scope_, x))); RunTest({x}, {x_shape}, {y}, {x_shape}); } @@ -868,7 +868,7 @@ TEST_F(NaryGradTest, UnsafeDiv) { // Return 0 gradient (rather than NaN) for division by zero. const auto x = Placeholder(scope_, DT_FLOAT); const auto zero = Const(scope_, 0.0); - const auto y = UnsafeDiv(scope_, x, zero); + const auto y = DivNoNan(scope_, x, zero); std::vector grad_outputs; TF_EXPECT_OK(AddSymbolicGradients(scope_, {y}, {x}, &grad_outputs)); -- cgit v1.2.3