aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/cc
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <gardener@tensorflow.org>2018-08-06 08:47:03 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-08-06 08:54:08 -0700
commit291f29b794bd0b782f4d8717bd29bda206060457 (patch)
tree817ffeea76f3a5c3ae7385ffab9a254286072328 /tensorflow/cc
parente01ad771da72a9f1aa328b0edd928573f08a237b (diff)
Handle NaN results when verifying gradients.
This change returns NaN as the maximum error if any gradient entry is NaN. This avoids masking gradient implementation errors in tests that don't expect to see NaN results. PiperOrigin-RevId: 207551265
Diffstat (limited to 'tensorflow/cc')
-rw-r--r--tensorflow/cc/framework/gradient_checker.cc9
-rw-r--r--tensorflow/cc/framework/gradient_checker_test.cc16
2 files changed, 24 insertions, 1 deletions
diff --git a/tensorflow/cc/framework/gradient_checker.cc b/tensorflow/cc/framework/gradient_checker.cc
index a1eb0d9d08..e9f9c59e3a 100644
--- a/tensorflow/cc/framework/gradient_checker.cc
+++ b/tensorflow/cc/framework/gradient_checker.cc
@@ -351,7 +351,14 @@ Status ComputeGradientErrorInternal(const Scope& scope, const OutputList& xs,
auto jac_n = jacobian_ns[i].matrix<JAC_T>();
for (int r = 0; r < jacobian_ts[i].dim_size(0); ++r) {
for (int c = 0; c < jacobian_ts[i].dim_size(1); ++c) {
- *max_error = std::max(*max_error, std::fabs(jac_t(r, c) - jac_n(r, c)));
+ auto cur_error = std::fabs(jac_t(r, c) - jac_n(r, c));
+ // Treat any NaN as max_error and immediately return.
+ // (Note that std::max may ignore NaN arguments.)
+ if (std::isnan(cur_error)) {
+ *max_error = cur_error;
+ return Status::OK();
+ }
+ *max_error = std::max(*max_error, cur_error);
}
}
}
diff --git a/tensorflow/cc/framework/gradient_checker_test.cc b/tensorflow/cc/framework/gradient_checker_test.cc
index d4f0a7f5ab..8dd762c282 100644
--- a/tensorflow/cc/framework/gradient_checker_test.cc
+++ b/tensorflow/cc/framework/gradient_checker_test.cc
@@ -28,12 +28,14 @@ namespace {
using ops::Complex;
using ops::Const;
+using ops::Div;
using ops::MatMul;
using ops::Placeholder;
using ops::Real;
using ops::Split;
using ops::Square;
using ops::Stack;
+using ops::Sub;
using ops::Unstack;
TEST(GradientCheckerTest, BasicFloat) {
@@ -104,6 +106,20 @@ TEST(GradientCheckerTest, Complex64ToFloat) {
EXPECT_LT(max_error, 1e-4);
}
+// When calculating gradients that are undefined, test we get NaN
+// as the computed error rather than 0.
+TEST(GradientCheckerTest, BasicNan) {
+ Scope scope = Scope::NewRootScope();
+ TensorShape shape({2, 4, 3});
+ auto x = Placeholder(scope, DT_FLOAT, Placeholder::Shape(shape));
+ // y = x/(x-x) should always return NaN
+ auto y = Div(scope, x, Sub(scope, x, x));
+ float max_error;
+ TF_ASSERT_OK((ComputeGradientError<float, float, float>(
+ scope, {x}, {shape}, {y}, {shape}, &max_error)));
+ EXPECT_TRUE(std::isnan(max_error));
+}
+
TEST(GradientCheckerTest, MatMulGrad) {
Scope scope = Scope::NewRootScope();