aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/cc/framework
diff options
context:
space:
mode:
authorGravatar Shanqing Cai <cais@google.com>2017-09-25 19:35:53 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2017-09-25 19:39:42 -0700
commite2e3a943c0a28b7656325acb3fcd035743d55ea0 (patch)
treef4b909d5410bdf3b94012392909e7805cd27a2a7 /tensorflow/cc/framework
parentdf22044be98c8b707601e03fe22ded53bcc28c7e (diff)
Merge changes from github.
END_PUBLIC --- Commit 1e1b3d902 authored by Pete Warden<pete@petewarden.com> Committed by gunan<gunan@google.com>: Changed output directory for Pi CI build to fix permissions problem with nightlies (#13257) * Fix for RTLD_GLOBAL breakage of Pi builds, and removed Eigen version change for Pi that's no longer needed * Fixed Pi Zero OpenBLAS build problems and tidied up directories used * More robust checks in Pi build script * Changed output directory for Pi CI build to fix permissions problem --- Commit fe3a2e65c authored by Yan Facai (???)<facai.yan@gmail.com> Committed by drpngx<drpngx@users.noreply.github.com>: check invalid string type for dest_nodes in extract_sub_graph (#13057) * BUG: check str type * TST: add unit test * CLN: remove list check * CLN: use warning * CLN: 2 indent * CLN: raise TypeError if not list * CLN: check string only --- Commit 225ab7629 authored by Jean Wanka<jm.wanka@gmail.com> Committed by Jean Wanka<jm.wanka@gmail.com>: Fix polynomial decay with cycle for global step=0 For polynomial decay with cycle=True the learning rate at step 0 becomes NaN, because in the process of calculating it we devide by 0. This change should fix it, by setting the multiplier for the decay steps to one for global_step=0. --- Commit 286f57061 authored by Bjarke Hammersholt Roune<broune@google.com> Committed by TensorFlower Gardener<gardener@tensorflow.org>: Make Service::TransferToClient not attempt to manipulate the literal when the transfer failed, preventing a crash and allowing the caller to see the reason for the failed transfer. PiperOrigin-RevId: 169770126 --- Commit e0501bc4d authored by Yong Tang<yong.tang.github@outlook.com> Committed by Shanqing Cai<cais@google.com>: Fix GRUBlockCell parameter naming inconsistency (#13153) * Fix GRUBlockCell parameter naming inconsistency This fix tries to fix the issue in 13137 where parameter `cell_size` is used instead of `num_units`. This is inconsistent with other RNN cells. This fix adds support of `num_units` while at the same time maintains backward compatiblility for `cell_size`. This fix fixes 13137. Signed-off-by: Yong Tang <yong.tang.github@outlook.com> * Add `@deprecated_args` for 'cell_size' in `GRUBlockCell` This commit adds `@deprecated_args` for 'cell_size' in `GRUBlockCell` Signed-off-by: Yong Tang <yong.tang.github@outlook.com> * Address review comment Signed-off-by: Yong Tang <yong.tang.github@outlook.com> --- Commit 02a2eba05 authored by Pete Warden<pete@petewarden.com> Committed by gunan<gunan@google.com>: Fix for RTLD_GLOBAL breakage of Pi builds, and removed Eigen version change that's no longer needed (#13251) * Fix for RTLD_GLOBAL breakage of Pi builds, and removed Eigen version change for Pi that's no longer needed * Fixed Pi Zero OpenBLAS build problems and tidied up directories used * More robust checks in Pi build script --- Commit 8ef722253 authored by Sanjoy Das<sanjoy@google.com> Committed by TensorFlower Gardener<gardener@tensorflow.org>: Remove a redundant setName. The EmitComputation should have emitted a function with the right name, so use a CHECK instead. PiperOrigin-RevId: 169764856 --- Commit 1b94147dc authored by Neal Wu<wun@google.com> Committed by TensorFlower Gardener<gardener@tensorflow.org>: Fix broken GitHub links in tensorflow and tensorflow_models resulting from The Great Models Move (a.k.a. the research subfolder) PiperOrigin-RevId: 169763373 --- Commit b1ada5f0c authored by Justine Tunney<jart@google.com> Committed by TensorFlower Gardener<gardener@tensorflow.org>: Fix TensorBoard python -m invoke in docs PiperOrigin-RevId: 169758752 --- Commit 2957cd894 authored by Mustafa Ispir<ispir@google.com> Committed by TensorFlower Gardener<gardener@tensorflow.org>: Local run option of estimator training. PiperOrigin-RevId: 169756384 --- Commit 1dc2fe7ac authored by Gunhan Gulsoy<gunan@google.com> Committed by TensorFlower Gardener<gardener@tensorflow.org>: BEGIN_PUBLIC Automated g4 rollback of changelist 166264198 PiperOrigin-RevId: 169998124
Diffstat (limited to 'tensorflow/cc/framework')
-rw-r--r--tensorflow/cc/framework/gradients.cc10
-rw-r--r--tensorflow/cc/framework/gradients_test.cc44
2 files changed, 41 insertions, 13 deletions
diff --git a/tensorflow/cc/framework/gradients.cc b/tensorflow/cc/framework/gradients.cc
index 82469261e5..b665ce744d 100644
--- a/tensorflow/cc/framework/gradients.cc
+++ b/tensorflow/cc/framework/gradients.cc
@@ -175,8 +175,14 @@ Status SymbolicGradientBuilder::Initialize() {
"Must specify a gradient input for each output.");
}
std::vector<bool> reachable_nodes = GetReachableNodes();
- // TODO(theflofly) Check that inputs_ are reachable from
- // outputs_ using reachable_nodes
+ for (const Output& input : inputs_) {
+ if (!reachable_nodes[input.node()->id()]) {
+ return errors::InvalidArgument(
+ "Cannot compute the partial derivative for node '",
+ input.node()->name(),
+ "' as it's unreachable from the output node(s).");
+ }
+ }
grad_outputs_->clear();
grad_outputs_->resize(inputs_.size());
// Populate `output_nodes_` from node ids in `outputs_`.
diff --git a/tensorflow/cc/framework/gradients_test.cc b/tensorflow/cc/framework/gradients_test.cc
index 032ab93623..dcaf10c340 100644
--- a/tensorflow/cc/framework/gradients_test.cc
+++ b/tensorflow/cc/framework/gradients_test.cc
@@ -48,9 +48,9 @@ class GradientsTest : public ::testing::Test {
Scope scope_test_;
};
-// EX.
+// Example:
// ^ ^
-// dy| dx| // MatMul Gradient Graph
+// dy| dx| (MatMul Gradient Graph)
// | |
// MatMul_1 MatMul_2
// ^ ^ ^ ^
@@ -61,7 +61,7 @@ class GradientsTest : public ::testing::Test {
// | Const_3 |
// | |
// | ^ |
-// | z| | // MatMul Forward Graph
+// | z| | (MatMul Forward Graph)
// | | |
// | MatMul_0 |
// | / \ |
@@ -373,24 +373,22 @@ TEST_F(GradientsTest, UnreachableEdgeGradOneOutput) {
auto y_const = Const(scope_test_, {{1.0}, {2.0}, {3.0}});
auto y_assign = Assign(scope_test_, y, y_const);
- auto m1 = MatMul(scope_test_, x, y);
+ auto m = MatMul(scope_test_, x, y);
auto z = Variable(scope_test_, {1, 3}, DT_DOUBLE);
auto z_const = Const(scope_test_, {{9.0, 10.0, 11.0}});
auto z_assign = Assign(scope_test_, z, z_const);
- auto m2 = MatMul(scope_test_, y, z);
-
- auto dm1 = Const(scope_test_, {{0.5}, {0.5}});
+ auto diff_m = Const(scope_test_, {{0.5}, {0.5}});
std::vector<Output> grad_outputs;
TF_ASSERT_OK(
- AddSymbolicGradients(scope_test_, {m1}, {y}, {dm1}, &grad_outputs));
+ AddSymbolicGradients(scope_test_, {m}, {y}, {diff_m}, &grad_outputs));
std::vector<Tensor> outputs;
test::GetTensors(scope_test_, {x_assign, y_assign, z_assign},
{grad_outputs[0]}, &outputs);
- // dz/dy = xT * dm1
+ // dz/dy = xT * diff_m
test::ExpectTensorNear<double>(
outputs[0], test::AsTensor<double>({2.5, 3.5, 4.5}, {3, 1}), 1e-5);
}
@@ -424,13 +422,37 @@ TEST_F(GradientsTest, UnreachableEdgeGradTwoOutputs) {
test::GetTensors(scope_test_, {x_assign, y_assign, z_assign},
{grad_outputs[0]}, &outputs);
- // the gradients from m1 and m2 will be summed to compute the gradient
- // w.r.t y
+ // The gradients from m1 and m2 will be summed to compute the gradient
+ // w.r.t y:
// dz/dy = xT * dm1 + dm2 * zT
test::ExpectTensorNear<double>(
outputs[0], test::AsTensor<double>({17.5, 24.7, 26.8}, {3, 1}), 1e-5);
}
+TEST_F(GradientsTest, UnreachableInput) {
+ auto x = Const(scope_test_, {{1.0, 2.0, 3.0}, {4.0, 5.0, 6.0}});
+ auto y = Const(scope_test_, {{1.0}, {2.0}, {3.0}});
+ auto z = Const(scope_test_.WithOpName("z"), {{9.0, 10.0, 11.0}});
+
+ auto m1 = MatMul(scope_test_, x, y);
+ auto m2 = MatMul(scope_test_, y, z);
+ auto dm1 = Const(scope_test_, {{0.5}, {0.5}});
+
+ // From m1, z is unreachable, so an error status should be returned.
+ // m2 m1
+ // | |
+ // * *
+ // / \ / \
+ // z y x
+ std::vector<Output> grad_outputs;
+ Status status =
+ AddSymbolicGradients(scope_test_, {m1}, {z}, {dm1}, &grad_outputs);
+ EXPECT_EQ(status.code(), error::INVALID_ARGUMENT);
+ EXPECT_EQ(status.error_message(),
+ "Cannot compute the partial derivative"
+ " for node 'z' as it's unreachable from the output node(s).");
+}
+
// StopGradientSingleOutputMultiEdgeTest tests combinations of valid and
// 'NoGradient' (induced by StopGradient op) returned along multiple edges from
// a single nodes output.