From 23eccfb17635bce1c19b668986dceae1281ccee8 Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Sat, 6 Feb 2016 21:38:25 -0800 Subject: Adds c++ grad for ExpandDims and Transpose. Change: 114043957 --- tensorflow/core/ops/array_grad.cc | 20 ++++++ tensorflow/core/ops/array_grad_test.cc | 119 +++++++++++++++++++++++++++++++++ 2 files changed, 139 insertions(+) diff --git a/tensorflow/core/ops/array_grad.cc b/tensorflow/core/ops/array_grad.cc index 45c1152a33..4bd50c2aef 100644 --- a/tensorflow/core/ops/array_grad.cc +++ b/tensorflow/core/ops/array_grad.cc @@ -47,6 +47,7 @@ Status ReshapeGrad(const AttrSlice& attrs, FunctionDef* g) { return Status::OK(); } REGISTER_OP_GRADIENT("Reshape", ReshapeGrad); +REGISTER_OP_GRADIENT("ExpandDims", ReshapeGrad); Status IdentityGrad(const AttrSlice& attrs, FunctionDef* g) { // clang-format off @@ -260,4 +261,23 @@ Status FillGrad(const AttrSlice& attrs, FunctionDef* g) { } REGISTER_OP_GRADIENT("Fill", FillGrad); +Status TransposeGrad(const AttrSlice& attrs, FunctionDef* g) { + *g = FDH::Define( + // Arg defs + {"x: T", "p: int32", "dy: T"}, + // Ret val defs + {"dx: T", "dp: int32"}, + // Attr defs + {"T: {float, double}"}, + // Nodes + { + {{"q"}, "InvertPermutation", {"p"}, {}}, + {{"dx"}, "Transpose", {"dy", "q"}, {{"T", "$T"}}}, + {{"dp"}, "ZerosLike", {"p"}, {{"T", DT_INT32}}}, + }); + VLOG(1) << "TransposeGrad " << DebugString(*g); + return Status::OK(); +} +REGISTER_OP_GRADIENT("Transpose", TransposeGrad); + } // end namespace tensorflow diff --git a/tensorflow/core/ops/array_grad_test.cc b/tensorflow/core/ops/array_grad_test.cc index 9f6ff6f840..401286ace1 100644 --- a/tensorflow/core/ops/array_grad_test.cc +++ b/tensorflow/core/ops/array_grad_test.cc @@ -197,4 +197,123 @@ TEST_F(ArrayGradTest, SplitGrad) { {2, 4, 5})); } +std::vector ReshapeGrad(const Tensor& x, const Tensor& s, + const Tensor& dy) { + auto T = DT_FLOAT; + auto gdef = test::function::GDef( + {f::NDef("x", "Placeholder", {}, {{"dtype", T}}), + f::NDef("s", "Placeholder", {}, {{"dtype", DT_INT32}}), + f::NDef("dy", "Placeholder", {}, {{"dtype", T}}), + f::NDef("dx", "SymbolicGradient", {"x", "s", "dy"}, + {{"f", FDH::FunctionRef("Reshape", {{"T", T}})}, + {"Tin", DataTypeSlice{T, DT_INT32, T}}, + {"Tout", DataTypeSlice{T, DT_INT32}}})}); + VLOG(1) << DebugStringWhole(gdef); + auto sess = NewSession(); + TF_CHECK_OK(sess->Create(gdef)); + std::vector out; + TF_CHECK_OK(sess->Run({{"x:0", x}, {"s:0", s}, {"dy:0", dy}}, + {"dx:0", "dx:1"}, {}, &out)); + CHECK_EQ(out.size(), 2); + TF_CHECK_OK(sess->Close()); + delete sess; + return out; +} + +TEST_F(ArrayGradTest, ReshapeGrad) { + Tensor x(DT_FLOAT, {2, 4, 5}); + x.flat().setZero(); + auto s = test::AsTensor({8, 5}); + Tensor dy(DT_FLOAT, {8, 5}); + test::FillIota(&dy, 73); + auto dx = ReshapeGrad(x, s, dy); + test::ExpectClose( + dx[0], test::AsTensor( + {73., 74., 75., 76., 77., 78., 79., 80., 81., 82., + 83., 84., 85., 86., 87., 88., 89., 90., 91., 92., + 93., 94., 95., 96., 97., 98., 99., 100., 101., 102., + 103., 104., 105., 106., 107., 108., 109., 110., 111., 112.}, + {2, 4, 5})); + test::ExpectTensorEqual(dx[1], test::AsTensor({0, 0})); +} + +std::vector ExpandDimsGrad(const Tensor& x, const Tensor& s, + const Tensor& dy) { + auto T = DT_FLOAT; + auto gdef = test::function::GDef( + {f::NDef("x", "Placeholder", {}, {{"dtype", T}}), + f::NDef("s", "Placeholder", {}, {{"dtype", DT_INT32}}), + f::NDef("dy", "Placeholder", {}, {{"dtype", T}}), + f::NDef("dx", "SymbolicGradient", {"x", "s", "dy"}, + {{"f", FDH::FunctionRef("ExpandDims", {{"T", T}})}, + {"Tin", DataTypeSlice{T, DT_INT32, T}}, + {"Tout", DataTypeSlice{T, DT_INT32}}})}); + VLOG(1) << DebugStringWhole(gdef); + auto sess = NewSession(); + TF_CHECK_OK(sess->Create(gdef)); + std::vector out; + TF_CHECK_OK(sess->Run({{"x:0", x}, {"s:0", s}, {"dy:0", dy}}, + {"dx:0", "dx:1"}, {}, &out)); + CHECK_EQ(out.size(), 2); + TF_CHECK_OK(sess->Close()); + delete sess; + return out; +} + +TEST_F(ArrayGradTest, ExpandDimsGrad) { + Tensor x(DT_FLOAT, {2, 4, 5}); + x.flat().setZero(); + auto s = test::AsTensor({1}); + Tensor dy(DT_FLOAT, {2, 1, 4, 5}); + test::FillIota(&dy, 73); + auto dx = ExpandDimsGrad(x, s, dy); + test::ExpectClose( + dx[0], test::AsTensor( + {73., 74., 75., 76., 77., 78., 79., 80., 81., 82., + 83., 84., 85., 86., 87., 88., 89., 90., 91., 92., + 93., 94., 95., 96., 97., 98., 99., 100., 101., 102., + 103., 104., 105., 106., 107., 108., 109., 110., 111., 112.}, + {2, 4, 5})); + test::ExpectTensorEqual(dx[1], test::AsTensor({0})); +} + +std::vector TransposeGrad(const Tensor& x, const Tensor& p, + const Tensor& dy) { + auto T = DT_FLOAT; + auto gdef = test::function::GDef( + {f::NDef("x", "Placeholder", {}, {{"dtype", T}}), + f::NDef("p", "Placeholder", {}, {{"dtype", DT_INT32}}), + f::NDef("dy", "Placeholder", {}, {{"dtype", T}}), + f::NDef("dx", "SymbolicGradient", {"x", "p", "dy"}, + {{"f", FDH::FunctionRef("Transpose", {{"T", T}})}, + {"Tin", DataTypeSlice{T, DT_INT32, T}}, + {"Tout", DataTypeSlice{T, DT_INT32}}})}); + VLOG(1) << DebugStringWhole(gdef); + auto sess = NewSession(); + TF_CHECK_OK(sess->Create(gdef)); + std::vector out; + TF_CHECK_OK(sess->Run({{"x:0", x}, {"p:0", p}, {"dy:0", dy}}, + {"dx:0", "dx:1"}, {}, &out)); + CHECK_EQ(out.size(), 2); + TF_CHECK_OK(sess->Close()); + delete sess; + return out; +} + +TEST_F(ArrayGradTest, TransposeGrad) { + Tensor x(DT_FLOAT, {2, 4, 5}); + x.flat().setZero(); + auto p = test::AsTensor({2, 0, 1}); + Tensor dy(DT_FLOAT, {5, 2, 4}); + test::FillIota(&dy, 0); + auto dx = TransposeGrad(x, p, dy); + test::ExpectClose(dx[0], test::AsTensor( + {0., 8., 16., 24., 32., 1., 9., 17., 25., 33., + 2., 10., 18., 26., 34., 3., 11., 19., 27., 35., + 4., 12., 20., 28., 36., 5., 13., 21., 29., 37., + 6., 14., 22., 30., 38., 7., 15., 23., 31., 39.}, + {2, 4, 5})); + test::ExpectTensorEqual(dx[1], test::AsTensor({0, 0, 0})); +} + } // namespace tensorflow -- cgit v1.2.3