aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/cc/gradients
diff options
context:
space:
mode:
authorGravatar Suharsh Sivakumar <suharshs@google.com>2016-11-07 16:39:37 -0800
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2016-11-08 16:14:03 -0800
commit39f04d940e3624e17f8855ae6dd7c7bf55545b70 (patch)
tree0bc52ab8fbee44c0053e6bb5d79a95c5326879b3 /tensorflow/cc/gradients
parent70bd6c87f42bdba77290907f1b2444b23e6e33c3 (diff)
C++ gradient checker support multiple inputs and outputs.
Also added a few more array ops. Change: 138456760
Diffstat (limited to 'tensorflow/cc/gradients')
-rw-r--r--tensorflow/cc/gradients/array_grad.cc44
-rw-r--r--tensorflow/cc/gradients/array_grad_test.cc44
-rw-r--r--tensorflow/cc/gradients/nn_grad_test.cc4
3 files changed, 89 insertions, 3 deletions
diff --git a/tensorflow/cc/gradients/array_grad.cc b/tensorflow/cc/gradients/array_grad.cc
index 0160afaef1..42f1137aad 100644
--- a/tensorflow/cc/gradients/array_grad.cc
+++ b/tensorflow/cc/gradients/array_grad.cc
@@ -18,6 +18,7 @@ limitations under the License.
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/cc/framework/grad_op_registry.h"
+#include "tensorflow/cc/framework/gradients.h"
namespace tensorflow {
namespace ops {
@@ -25,6 +26,16 @@ namespace {
REGISTER_NO_GRADIENT_OP("Const");
REGISTER_NO_GRADIENT_OP("StopGradient");
+REGISTER_NO_GRADIENT_OP("ConcatOffset");
+REGISTER_NO_GRADIENT_OP("EditDistance");
+REGISTER_NO_GRADIENT_OP("ZerosLike");
+REGISTER_NO_GRADIENT_OP("InvertPermutation");
+REGISTER_NO_GRADIENT_OP("Shape");
+REGISTER_NO_GRADIENT_OP("ShapeN");
+REGISTER_NO_GRADIENT_OP("Rank");
+REGISTER_NO_GRADIENT_OP("Size");
+REGISTER_NO_GRADIENT_OP("BroadcastGradientArgs");
+REGISTER_NO_GRADIENT_OP("OneHot");
Status PackGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
@@ -69,6 +80,39 @@ Status RefIdentityGrad(const Scope& scope, const Operation& op,
}
REGISTER_GRADIENT_OP("RefIdentity", RefIdentityGrad);
+Status SplitGrad(const Scope& scope, const Operation& op,
+ const std::vector<Output>& grad_inputs,
+ std::vector<Output>* grad_outputs) {
+ grad_outputs->push_back(NoGradient());
+ grad_outputs->push_back(Concat(scope, op.input(0), grad_inputs));
+ return Status::OK();
+}
+REGISTER_GRADIENT_OP("Split", SplitGrad);
+
+Status DiagGrad(const Scope& scope, const Operation& op,
+ const std::vector<Output>& grad_inputs,
+ std::vector<Output>* grad_outputs) {
+ grad_outputs->push_back(DiagPart(scope, grad_inputs[0]));
+ return Status::OK();
+}
+REGISTER_GRADIENT_OP("Diag", DiagGrad);
+
+Status DiagPartGrad(const Scope& scope, const Operation& op,
+ const std::vector<Output>& grad_inputs,
+ std::vector<Output>* grad_outputs) {
+ grad_outputs->push_back(Diag(scope, grad_inputs[0]));
+ return Status::OK();
+}
+REGISTER_GRADIENT_OP("DiagPart", DiagPartGrad);
+
+Status MatrixDiagGrad(const Scope& scope, const Operation& op,
+ const std::vector<Output>& grad_inputs,
+ std::vector<Output>* grad_outputs) {
+ grad_outputs->push_back(MatrixDiagPart(scope, grad_inputs[0]));
+ return Status::OK();
+}
+REGISTER_GRADIENT_OP("MatrixDiag", MatrixDiagGrad);
+
} // anonymous namespace
} // namespace ops
} // namespace tensorflow
diff --git a/tensorflow/cc/gradients/array_grad_test.cc b/tensorflow/cc/gradients/array_grad_test.cc
index 5935d14fb7..327d58bd6c 100644
--- a/tensorflow/cc/gradients/array_grad_test.cc
+++ b/tensorflow/cc/gradients/array_grad_test.cc
@@ -115,8 +115,16 @@ class ArrayGradTest : public ::testing::Test {
void RunTest(const Output& x, const TensorShape& x_shape, const Output& y,
const TensorShape& y_shape) {
float max_error;
+ TF_ASSERT_OK(ComputeGradientError(scope_, {x}, {x_shape}, {y}, {y_shape},
+ &max_error));
+ EXPECT_LT(max_error, 1e-4);
+ }
+
+ void RunTest(const OutputList& xs, const std::vector<TensorShape>& x_shapes,
+ const OutputList& ys, const std::vector<TensorShape>& y_shapes) {
+ float max_error;
TF_ASSERT_OK(
- ComputeGradientError(scope_, x, x_shape, y, y_shape, &max_error));
+ ComputeGradientError(scope_, xs, x_shapes, ys, y_shapes, &max_error));
EXPECT_LT(max_error, 1e-4);
}
@@ -130,5 +138,39 @@ TEST_F(ArrayGradTest, IdentityGrad) {
RunTest(x, shape, y, shape);
}
+TEST_F(ArrayGradTest, SplitGrad) {
+ TensorShape x_shape({5, 2});
+ auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
+ // Split along the second dimension.
+ auto split_dim = Const(scope_, 1, {});
+ auto y = Split(scope_, split_dim, x, /* num_split */ 2);
+ TensorShape y_shape = TensorShape({5, 1});
+ RunTest({x}, {x_shape}, y.output, {y_shape, y_shape});
+}
+
+TEST_F(ArrayGradTest, DiagGrad) {
+ TensorShape x_shape({5, 2});
+ auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
+ auto y = Diag(scope_, x);
+ TensorShape y_shape({5, 2, 5, 2});
+ RunTest(x, x_shape, y, y_shape);
+}
+
+TEST_F(ArrayGradTest, DiagPartGrad) {
+ TensorShape x_shape({5, 2, 5, 2});
+ auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
+ auto y = DiagPart(scope_, x);
+ TensorShape y_shape({5, 2});
+ RunTest(x, x_shape, y, y_shape);
+}
+
+TEST_F(ArrayGradTest, MatrixDiagGrad) {
+ TensorShape x_shape({5, 2});
+ auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
+ auto y = MatrixDiag(scope_, x);
+ TensorShape y_shape({5, 2, 2});
+ RunTest(x, x_shape, y, y_shape);
+}
+
} // namespace
} // namespace tensorflow
diff --git a/tensorflow/cc/gradients/nn_grad_test.cc b/tensorflow/cc/gradients/nn_grad_test.cc
index ef0a2f9626..4e37d0d585 100644
--- a/tensorflow/cc/gradients/nn_grad_test.cc
+++ b/tensorflow/cc/gradients/nn_grad_test.cc
@@ -34,8 +34,8 @@ class NNGradTest : public ::testing::Test {
void RunTest(const Output& x, const TensorShape& x_shape, const Output& y,
const TensorShape& y_shape) {
float max_error;
- TF_ASSERT_OK(
- ComputeGradientError(scope_, x, x_shape, y, y_shape, &max_error));
+ TF_ASSERT_OK(ComputeGradientError(scope_, {x}, {x_shape}, {y}, {y_shape},
+ &max_error));
EXPECT_LT(max_error, 1e-4);
}