/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include #include #include "tensorflow/core/framework/function_testlib.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/public/session.h" namespace tensorflow { namespace { namespace f = test::function; using FDH = FunctionDefHelper; std::unique_ptr NewSession() { SessionOptions opts; (*opts.config.mutable_device_count())["CPU"] = 1; return std::unique_ptr(NewSession(opts)); } std::vector PackGrad(const Tensor& x0, const Tensor& x1, const Tensor& dy, int axis) { auto T = DT_FLOAT; auto gdef = test::function::GDef( {f::NDef("x0", "Placeholder", {}, {{"dtype", T}}), f::NDef("x1", "Placeholder", {}, {{"dtype", T}}), f::NDef("axis", "Placeholder", {}, {{"dtype", DT_INT32}}), f::NDef("dy", "Placeholder", {}, {{"dtype", T}}), f::NDef("dx", "SymbolicGradient", {"x0", "x1", "dy"}, {{"f", FDH::FunctionRef("Pack", {{"N", 2}, {"T", T}, {"axis", axis}})}, {"Tin", DataTypeSlice{T, T, T}}, {"Tout", DataTypeSlice{T, T}}})}); VLOG(1) << DebugStringWhole(gdef); auto sess = NewSession(); TF_CHECK_OK(sess->Create(gdef)); std::vector out; TF_CHECK_OK(sess->Run({{"x0:0", x0}, {"x1:0", x1}, {"axis:0", test::AsScalar(axis)}, {"dy:0", dy}}, {"dx:0", "dx:1"}, {}, &out)); CHECK_EQ(out.size(), 2); TF_CHECK_OK(sess->Close()); return out; } TEST(ArrayGradTest, PackGrad) { Tensor x0(DT_FLOAT, {2, 3}); x0.flat().setZero(); Tensor x1(DT_FLOAT, {2, 3}); x1.flat().setZero(); Tensor dy(DT_FLOAT, {2, 2, 3}); test::FillIota(&dy, 0); auto dx = PackGrad(x0, x1, dy, 0); test::ExpectClose(dx[0], test::AsTensor({0., 1., 2., 3., 4., 5.}, {2, 3})); test::ExpectClose(dx[1], test::AsTensor({6., 7., 8., 9., 10., 11.}, {2, 3})); } std::vector UnpackGrad(const Tensor& x, const Tensor& dy0, const Tensor& dy1, int axis) { auto T = DT_FLOAT; auto gdef = test::function::GDef( {f::NDef("x", "Placeholder", {}, {{"dtype", T}}), f::NDef("axis", "Placeholder", {}, {{"dtype", DT_INT32}}), f::NDef("dy0", "Placeholder", {}, {{"dtype", T}}), f::NDef("dy1", "Placeholder", {}, {{"dtype", T}}), f::NDef("dx", "SymbolicGradient", {"x", "dy0", "dy1"}, {{"f", FDH::FunctionRef("Unpack", {{"num", 2}, {"T", T}, {"axis", axis}})}, {"Tin", DataTypeSlice{T, T, T}}, {"Tout", DataTypeSlice{T}}})}); VLOG(1) << DebugStringWhole(gdef); auto sess = NewSession(); TF_CHECK_OK(sess->Create(gdef)); std::vector out; TF_CHECK_OK(sess->Run({{"x:0", x}, {"axis:0", test::AsScalar(axis)}, {"dy0:0", dy0}, {"dy1:0", dy1}}, {"dx:0"}, {}, &out)); CHECK_EQ(out.size(), 1); TF_CHECK_OK(sess->Close()); return out; } TEST(ArrayGradTest, UnpackGrad) { Tensor x(DT_FLOAT, {2, 2, 3}); x.flat().setZero(); Tensor dy0(DT_FLOAT, {2, 3}); Tensor dy1(DT_FLOAT, {2, 3}); test::FillIota(&dy0, 0); test::FillIota(&dy1, 100); auto dx = UnpackGrad(x, dy0, dy1, 0); test::ExpectClose(dx[0], test::AsTensor({0., 1., 2., 3., 4., 5., 100., 101., 102., 103., 104., 105.}, {2, 2, 3})); } std::vector ConcatGrad(int dim, const Tensor& x0, const Tensor& x1, const Tensor& dy) { auto T = DT_FLOAT; auto gdef = test::function::GDef( {f::NDef("dim", "Placeholder", {}, {{"dtype", DT_INT32}}), f::NDef("x0", "Placeholder", {}, {{"dtype", T}}), f::NDef("x1", "Placeholder", {}, {{"dtype", T}}), f::NDef("dy", "Placeholder", {}, {{"dtype", T}}), f::NDef("dx", "SymbolicGradient", {"dim", "x0", "x1", "dy"}, {{"f", FDH::FunctionRef("Concat", {{"N", 2}, {"T", T}})}, {"Tin", DataTypeSlice{DT_INT32, T, T, T}}, {"Tout", DataTypeSlice{DT_INT32, T, T}}})}); VLOG(1) << DebugStringWhole(gdef); auto sess = NewSession(); TF_CHECK_OK(sess->Create(gdef)); std::vector out; TF_CHECK_OK(sess->Run( {{"dim", test::AsScalar(dim)}, {"x0:0", x0}, {"x1:0", x1}, {"dy:0", dy}}, {"dx:0", "dx:1", "dx:2"}, {}, &out)); CHECK_EQ(out.size(), 3); TF_CHECK_OK(sess->Close()); return out; } std::vector ConcatGradV2(int dim, const Tensor& x0, const Tensor& x1, const Tensor& dy) { auto T = DT_FLOAT; auto gdef = test::function::GDef( {f::NDef("x0", "Placeholder", {}, {{"dtype", T}}), f::NDef("x1", "Placeholder", {}, {{"dtype", T}}), f::NDef("dim", "Placeholder", {}, {{"dtype", DT_INT32}}), f::NDef("dy", "Placeholder", {}, {{"dtype", T}}), f::NDef("dx", "SymbolicGradient", {"x0", "x1", "dim", "dy"}, {{"f", FDH::FunctionRef("ConcatV2", {{"N", 2}, {"T", T}})}, {"Tin", DataTypeSlice{T, T, DT_INT32, T}}, {"Tout", DataTypeSlice{T, T, DT_INT32}}})}); VLOG(1) << DebugStringWhole(gdef); auto sess = NewSession(); TF_CHECK_OK(sess->Create(gdef)); std::vector out; TF_CHECK_OK(sess->Run( {{"x0:0", x0}, {"x1:0", x1}, {"dim", test::AsScalar(dim)}, {"dy:0", dy}}, {"dx:0", "dx:1", "dx:2"}, {}, &out)); CHECK_EQ(out.size(), 3); TF_CHECK_OK(sess->Close()); return out; } TEST(ArrayGradTest, ConcatGrad) { Tensor x0(DT_FLOAT, {2, 3, 5}); x0.flat().setZero(); Tensor x1(DT_FLOAT, {2, 1, 5}); x1.flat().setZero(); Tensor dy(DT_FLOAT, {2, 4, 5}); test::FillIota(&dy, 0); // Test Concat. auto dx = ConcatGrad(1, x0, x1, dy); test::ExpectTensorEqual(dx[0], test::AsScalar(0)); test::ExpectClose( dx[1], test::AsTensor({0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., 34.}, {2, 3, 5})); test::ExpectClose(dx[2], test::AsTensor({15., 16., 17., 18., 19., 35., 36., 37., 38., 39.}, {2, 1, 5})); // Test ConcatV2 with positive concat axis. dx = ConcatGradV2(1, x0, x1, dy); test::ExpectTensorEqual(dx[dx.size() - 1], test::AsScalar(0)); test::ExpectClose( dx[0], test::AsTensor({0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., 34.}, {2, 3, 5})); test::ExpectClose(dx[1], test::AsTensor({15., 16., 17., 18., 19., 35., 36., 37., 38., 39.}, {2, 1, 5})); // Test ConcatV2 with negative concat axis. dx = ConcatGradV2(-2, x0, x1, dy); test::ExpectTensorEqual(dx[dx.size() - 1], test::AsScalar(0)); test::ExpectClose( dx[0], test::AsTensor({0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., 34.}, {2, 3, 5})); test::ExpectClose(dx[1], test::AsTensor({15., 16., 17., 18., 19., 35., 36., 37., 38., 39.}, {2, 1, 5})); } std::vector SplitGrad(int dim, const Tensor& x, const Tensor& dy0, const Tensor& dy1) { auto T = DT_FLOAT; auto gdef = test::function::GDef( {f::NDef("dim", "Placeholder", {}, {{"dtype", DT_INT32}}), f::NDef("x", "Placeholder", {}, {{"dtype", T}}), f::NDef("dy0", "Placeholder", {}, {{"dtype", T}}), f::NDef("dy1", "Placeholder", {}, {{"dtype", T}}), f::NDef("dx", "SymbolicGradient", {"dim", "x", "dy0", "dy1"}, {{"f", FDH::FunctionRef( "Split", {{"split_dim", dim}, {"num_split", 2}, {"T", T}})}, {"Tin", DataTypeSlice{DT_INT32, T, T, T}}, {"Tout", DataTypeSlice{DT_INT32, T}}})}); VLOG(1) << DebugStringWhole(gdef); auto sess = NewSession(); TF_CHECK_OK(sess->Create(gdef)); std::vector out; TF_CHECK_OK(sess->Run({{"dim", test::AsScalar(dim)}, {"x:0", x}, {"dy0:0", dy0}, {"dy1:0", dy1}}, {"dx:0", "dx:1"}, {}, &out)); CHECK_EQ(out.size(), 2); TF_CHECK_OK(sess->Close()); return out; } std::vector SplitVGrad(const Tensor& x, const Tensor& size_splits, int dim, const Tensor& dy0, const Tensor& dy1) { auto T = DT_FLOAT; auto Tlen = DT_INT64; auto gdef = test::function::GDef( {f::NDef("x", "Placeholder", {}, {{"dtype", T}}), f::NDef("size_splits", "Placeholder", {}, {{"dtype", Tlen}}), f::NDef("dim", "Placeholder", {}, {{"dtype", DT_INT32}}), f::NDef("dy0", "Placeholder", {}, {{"dtype", T}}), f::NDef("dy1", "Placeholder", {}, {{"dtype", T}}), f::NDef("dx", "SymbolicGradient", {"x", "size_splits", "dim", "dy0", "dy1"}, {{"f", FDH::FunctionRef("SplitV", {{"split_dim", dim}, {"num_split", 2}, {"T", T}, {"Tlen", Tlen}})}, {"Tin", DataTypeSlice{T, Tlen, DT_INT32, T, T}}, {"Tout", DataTypeSlice{T, Tlen, DT_INT32}}})}); VLOG(1) << DebugStringWhole(gdef); auto sess = NewSession(); TF_CHECK_OK(sess->Create(gdef)); std::vector out; TF_CHECK_OK(sess->Run({{"x:0", x}, {"size_splits:0", size_splits}, {"dim", test::AsScalar(dim)}, {"dy0:0", dy0}, {"dy1:0", dy1}}, {"dx:0", "dx:1", "dx:2"}, {}, &out)); CHECK_EQ(out.size(), 3); TF_CHECK_OK(sess->Close()); return out; } TEST(ArrayGradTest, SplitGrad) { Tensor x(DT_FLOAT, {2, 4, 5}); x.flat().setZero(); Tensor dy0(DT_FLOAT, {2, 2, 5}); Tensor dy1(DT_FLOAT, {2, 2, 5}); test::FillIota(&dy0, 0); test::FillIota(&dy1, 100); auto expected_dx = test::AsTensor( {0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 100., 101., 102., 103., 104., 105., 106., 107., 108., 109., 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 110., 111., 112., 113., 114., 115., 116., 117., 118., 119.}, {2, 4, 5}); auto expected_d_dim = test::AsScalar(0); // SplitGrad { auto dx = SplitGrad(1, x, dy0, dy1); test::ExpectTensorEqual(dx[0], expected_d_dim); test::ExpectClose(dx[1], expected_dx); } // SplitVGrad { Tensor size_splits(DT_INT64, {2}); size_splits.flat().setConstant(2); auto expected_d_size_splits = test::AsTensor({0, 0}, {2}); auto dx = SplitVGrad(x, size_splits, 1, dy0, dy1); test::ExpectClose(dx[0], expected_dx); test::ExpectTensorEqual(dx[1], expected_d_size_splits); test::ExpectTensorEqual(dx[2], expected_d_dim); } } std::vector ReshapeGrad(const Tensor& x, const Tensor& s, const Tensor& dy) { auto T = DT_FLOAT; auto gdef = test::function::GDef( {f::NDef("x", "Placeholder", {}, {{"dtype", T}}), f::NDef("s", "Placeholder", {}, {{"dtype", DT_INT32}}), f::NDef("dy", "Placeholder", {}, {{"dtype", T}}), f::NDef("dx", "SymbolicGradient", {"x", "s", "dy"}, {{"f", FDH::FunctionRef("Reshape", {{"T", T}})}, {"Tin", DataTypeSlice{T, DT_INT32, T}}, {"Tout", DataTypeSlice{T, DT_INT32}}})}); VLOG(1) << DebugStringWhole(gdef); auto sess = NewSession(); TF_CHECK_OK(sess->Create(gdef)); std::vector out; TF_CHECK_OK(sess->Run({{"x:0", x}, {"s:0", s}, {"dy:0", dy}}, {"dx:0", "dx:1"}, {}, &out)); CHECK_EQ(out.size(), 2); TF_CHECK_OK(sess->Close()); return out; } TEST(ArrayGradTest, ReshapeGrad) { Tensor x(DT_FLOAT, {2, 4, 5}); x.flat().setZero(); auto s = test::AsTensor({8, 5}); Tensor dy(DT_FLOAT, {8, 5}); test::FillIota(&dy, 73); auto dx = ReshapeGrad(x, s, dy); test::ExpectClose( dx[0], test::AsTensor( {73., 74., 75., 76., 77., 78., 79., 80., 81., 82., 83., 84., 85., 86., 87., 88., 89., 90., 91., 92., 93., 94., 95., 96., 97., 98., 99., 100., 101., 102., 103., 104., 105., 106., 107., 108., 109., 110., 111., 112.}, {2, 4, 5})); test::ExpectTensorEqual(dx[1], test::AsTensor({0, 0})); } std::vector ExpandDimsGrad(const Tensor& x, const Tensor& s, const Tensor& dy) { auto T = DT_FLOAT; auto gdef = test::function::GDef( {f::NDef("x", "Placeholder", {}, {{"dtype", T}}), f::NDef("s", "Placeholder", {}, {{"dtype", DT_INT32}}), f::NDef("dy", "Placeholder", {}, {{"dtype", T}}), f::NDef("dx", "SymbolicGradient", {"x", "s", "dy"}, {{"f", FDH::FunctionRef("ExpandDims", {{"T", T}})}, {"Tin", DataTypeSlice{T, DT_INT32, T}}, {"Tout", DataTypeSlice{T, DT_INT32}}})}); VLOG(1) << DebugStringWhole(gdef); auto sess = NewSession(); TF_CHECK_OK(sess->Create(gdef)); std::vector out; TF_CHECK_OK(sess->Run({{"x:0", x}, {"s:0", s}, {"dy:0", dy}}, {"dx:0", "dx:1"}, {}, &out)); CHECK_EQ(out.size(), 2); TF_CHECK_OK(sess->Close()); return out; } TEST(ArrayGradTest, ExpandDimsGrad) { Tensor x(DT_FLOAT, {2, 4, 5}); x.flat().setZero(); auto s = test::AsTensor({1}); Tensor dy(DT_FLOAT, {2, 1, 4, 5}); test::FillIota(&dy, 73); auto dx = ExpandDimsGrad(x, s, dy); test::ExpectClose( dx[0], test::AsTensor( {73., 74., 75., 76., 77., 78., 79., 80., 81., 82., 83., 84., 85., 86., 87., 88., 89., 90., 91., 92., 93., 94., 95., 96., 97., 98., 99., 100., 101., 102., 103., 104., 105., 106., 107., 108., 109., 110., 111., 112.}, {2, 4, 5})); test::ExpectTensorEqual(dx[1], test::AsTensor({0})); } std::vector SqueezeGrad(const Tensor& x, const Tensor& dy) { auto T = DT_FLOAT; auto gdef = test::function::GDef( {f::NDef("x", "Placeholder", {}, {{"dtype", T}}), f::NDef("dy", "Placeholder", {}, {{"dtype", T}}), f::NDef("dx", "SymbolicGradient", {"x", "dy"}, {{"f", FDH::FunctionRef("Squeeze", {{"T", T}})}, {"Tin", DataTypeSlice{T, T}}, {"Tout", DataTypeSlice{T}}})}); VLOG(1) << DebugStringWhole(gdef); auto sess = NewSession(); TF_CHECK_OK(sess->Create(gdef)); std::vector out; TF_CHECK_OK(sess->Run({{"x:0", x}, {"dy:0", dy}}, {"dx:0"}, {}, &out)); CHECK_EQ(out.size(), 1); TF_CHECK_OK(sess->Close()); return out; } TEST(ArrayGradTest, SqueezeGrad) { Tensor x(DT_FLOAT, {2, 1, 3}); x.flat().setZero(); Tensor dy(DT_FLOAT, {2, 3}); test::FillIota(&dy, 1); auto dx = SqueezeGrad(x, dy); test::ExpectClose(dx[0], test::AsTensor({1., 2., 3., 4., 5., 6.}, {2, 1, 3})); } std::vector TransposeGrad(const Tensor& x, const Tensor& p, const Tensor& dy) { auto T = DT_FLOAT; auto gdef = test::function::GDef( {f::NDef("x", "Placeholder", {}, {{"dtype", T}}), f::NDef("p", "Placeholder", {}, {{"dtype", DT_INT32}}), f::NDef("dy", "Placeholder", {}, {{"dtype", T}}), f::NDef("dx", "SymbolicGradient", {"x", "p", "dy"}, {{"f", FDH::FunctionRef("Transpose", {{"T", T}})}, {"Tin", DataTypeSlice{T, DT_INT32, T}}, {"Tout", DataTypeSlice{T, DT_INT32}}})}); VLOG(1) << DebugStringWhole(gdef); auto sess = NewSession(); TF_CHECK_OK(sess->Create(gdef)); std::vector out; TF_CHECK_OK(sess->Run({{"x:0", x}, {"p:0", p}, {"dy:0", dy}}, {"dx:0", "dx:1"}, {}, &out)); CHECK_EQ(out.size(), 2); TF_CHECK_OK(sess->Close()); return out; } TEST(ArrayGradTest, TransposeGrad) { Tensor x(DT_FLOAT, {2, 4, 5}); x.flat().setZero(); auto p = test::AsTensor({2, 0, 1}); Tensor dy(DT_FLOAT, {5, 2, 4}); test::FillIota(&dy, 0); auto dx = TransposeGrad(x, p, dy); test::ExpectClose(dx[0], test::AsTensor( {0., 8., 16., 24., 32., 1., 9., 17., 25., 33., 2., 10., 18., 26., 34., 3., 11., 19., 27., 35., 4., 12., 20., 28., 36., 5., 13., 21., 29., 37., 6., 14., 22., 30., 38., 7., 15., 23., 31., 39.}, {2, 4, 5})); test::ExpectTensorEqual(dx[1], test::AsTensor({0, 0, 0})); } std::vector ReverseGrad(const Tensor& x, const Tensor& dims, const Tensor& dy) { auto T = DT_FLOAT; auto gdef = test::function::GDef( {f::NDef("x", "Placeholder", {}, {{"dtype", T}}), f::NDef("dims", "Placeholder", {}, {{"dtype", DT_BOOL}}), f::NDef("dy", "Placeholder", {}, {{"dtype", T}}), f::NDef("dx", "SymbolicGradient", {"x", "dims", "dy"}, {{"f", FDH::FunctionRef("Reverse", {{"T", T}})}, {"Tin", DataTypeSlice{T, DT_BOOL, T}}, {"Tout", DataTypeSlice{T, DT_BOOL}}})}); VLOG(1) << DebugStringWhole(gdef); auto sess = NewSession(); TF_CHECK_OK(sess->Create(gdef)); std::vector out; TF_CHECK_OK(sess->Run({{"x:0", x}, {"dims:0", dims}, {"dy:0", dy}}, {"dx:0", "dx:1"}, {}, &out)); CHECK_EQ(out.size(), 2); TF_CHECK_OK(sess->Close()); return out; } TEST(ArrayGradTest, ReverseGrad) { Tensor x(DT_FLOAT, {2, 3}); x.flat().setZero(); auto dims = test::AsTensor({false, true}); Tensor dy(DT_FLOAT, {2, 3}); test::FillIota(&dy, 1); auto dx = ReverseGrad(x, dims, dy); test::ExpectClose(dx[0], test::AsTensor({3., 2., 1., 6., 5., 4.}, {2, 3})); test::ExpectTensorEqual(dx[1], test::AsTensor({false, false})); } std::vector ReverseV2Grad(const Tensor& x, const Tensor& axis, const Tensor& dy) { auto T = DT_FLOAT; auto Tidx = DT_INT32; auto gdef = test::function::GDef( {f::NDef("x", "Placeholder", {}, {{"dtype", T}}), f::NDef("axis", "Placeholder", {}, {{"dtype", DT_INT32}}), f::NDef("dy", "Placeholder", {}, {{"dtype", T}}), f::NDef( "dx", "SymbolicGradient", {"x", "axis", "dy"}, {{"f", FDH::FunctionRef("ReverseV2", {{"T", T}, {"Tidx", Tidx}})}, {"Tin", DataTypeSlice{T, DT_INT32, T}}, {"Tout", DataTypeSlice{T, DT_INT32}}})}); VLOG(1) << DebugStringWhole(gdef); auto sess = NewSession(); TF_CHECK_OK(sess->Create(gdef)); std::vector out; TF_CHECK_OK(sess->Run({{"x:0", x}, {"axis:0", axis}, {"dy:0", dy}}, {"dx:0", "dx:1"}, {}, &out)); CHECK_EQ(out.size(), 2); TF_CHECK_OK(sess->Close()); return out; } TEST(ArrayGradTest, ReverseV2Grad) { Tensor x(DT_FLOAT, {2, 3}); x.flat().setZero(); auto axis = test::AsTensor({1}); Tensor dy(DT_FLOAT, {2, 3}); test::FillIota(&dy, 1); auto dx = ReverseV2Grad(x, axis, dy); test::ExpectTensorEqual( dx[0], test::AsTensor({3., 2., 1., 6., 5., 4.}, {2, 3})); test::ExpectTensorEqual(dx[1], test::AsTensor({0})); } std::vector SliceGrad(const Tensor& x, const Tensor& b, const Tensor& s, const Tensor& dy) { auto T = DT_FLOAT; auto gdef = test::function::GDef( {f::NDef("x", "Placeholder", {}, {{"dtype", T}}), f::NDef("b", "Placeholder", {}, {{"dtype", DT_INT32}}), f::NDef("s", "Placeholder", {}, {{"dtype", DT_INT32}}), f::NDef("dy", "Placeholder", {}, {{"dtype", T}}), f::NDef( "dx", "SymbolicGradient", {"x", "b", "s", "dy"}, {{"f", FDH::FunctionRef("Slice", {{"T", T}, {"Index", DT_INT32}})}, {"Tin", DataTypeSlice{T, DT_INT32, DT_INT32, T}}, {"Tout", DataTypeSlice{T, DT_INT32, DT_INT32}}})}); VLOG(1) << DebugStringWhole(gdef); auto sess = NewSession(); TF_CHECK_OK(sess->Create(gdef)); std::vector out; TF_CHECK_OK(sess->Run({{"x:0", x}, {"b:0", b}, {"s:0", s}, {"dy:0", dy}}, {"dx:0", "dx:1", "dx:2"}, {}, &out)); CHECK_EQ(out.size(), 3); TF_CHECK_OK(sess->Close()); return out; } TEST(ArrayGradTest, SliceGrad) { Tensor x(DT_FLOAT, {2, 3, 4}); x.flat().setZero(); auto begin = test::AsTensor({1, 1, 1}); auto size = test::AsTensor({1, 2, 2}); Tensor dy(DT_FLOAT, {1, 2, 2}); test::FillIota(&dy, 1); auto dx = SliceGrad(x, begin, size, dy); test::ExpectClose(dx[0], test::AsTensor( { 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 2., 0., 0., 3., 4., 0., }, {2, 3, 4})); test::ExpectTensorEqual(dx[1], test::AsTensor({0, 0, 0})); test::ExpectTensorEqual(dx[2], test::AsTensor({0, 0, 0})); } std::vector StridedSliceGrad(const Tensor& x, const Tensor& begin, const Tensor& end, const Tensor& strides, const Tensor& dy, int32 begin_mask, int32 end_mask, int32 ellipsis_mask, int32 new_axis_mask, int32 shrink_axis_mask) { auto T = DT_FLOAT; auto gdef = test::function::GDef( {f::NDef("x", "Placeholder", {}, {{"dtype", T}}), f::NDef("begin", "Placeholder", {}, {{"dtype", DT_INT32}}), f::NDef("end", "Placeholder", {}, {{"dtype", DT_INT32}}), f::NDef("strides", "Placeholder", {}, {{"dtype", DT_INT32}}), f::NDef("dy", "Placeholder", {}, {{"dtype", T}}), f::NDef( "dx", "SymbolicGradient", {"x", "begin", "end", "strides", "dy"}, {{"f", FDH::FunctionRef("StridedSlice", { {"T", T}, {"Index", DT_INT32}, {"begin_mask", begin_mask}, {"end_mask", end_mask}, {"new_axis_mask", new_axis_mask}, {"shrink_axis_mask", shrink_axis_mask}, {"ellipsis_mask", ellipsis_mask}, })}, {"Tin", DataTypeSlice{T, DT_INT32, DT_INT32, DT_INT32, T}}, {"Tout", DataTypeSlice{T, DT_INT32, DT_INT32, DT_INT32}}})}); VLOG(1) << DebugStringWhole(gdef); auto sess = NewSession(); TF_CHECK_OK(sess->Create(gdef)); std::vector out; TF_CHECK_OK(sess->Run({{"x:0", x}, {"begin:0", begin}, {"end:0", end}, {"strides:0", strides}, {"dy:0", dy}}, {"dx:0", "dx:1", "dx:2", "dx:3"}, {}, &out)); CHECK_EQ(out.size(), 4); TF_CHECK_OK(sess->Close()); return out; } std::vector StridedSliceGradGrad( const Tensor& shape, const Tensor& begin, const Tensor& end, const Tensor& strides, const Tensor& dy, const Tensor& grad, int32 begin_mask, int32 end_mask, int32 ellipsis_mask, int32 new_axis_mask, int32 shrink_axis_mask) { auto T = DT_FLOAT; auto gdef = test::function::GDef( {f::NDef("shape", "Placeholder", {}, {{"dtype", DT_INT32}}), f::NDef("begin", "Placeholder", {}, {{"dtype", DT_INT32}}), f::NDef("end", "Placeholder", {}, {{"dtype", DT_INT32}}), f::NDef("strides", "Placeholder", {}, {{"dtype", DT_INT32}}), f::NDef("dy", "Placeholder", {}, {{"dtype", T}}), f::NDef("grad", "Placeholder", {}, {{"dtype", T}}), f::NDef( "dx", "SymbolicGradient", {"shape", "begin", "end", "strides", "dy", "grad"}, {{"f", FDH::FunctionRef("StridedSliceGrad", { {"T", T}, {"Index", DT_INT32}, {"begin_mask", begin_mask}, {"end_mask", end_mask}, {"new_axis_mask", new_axis_mask}, {"shrink_axis_mask", shrink_axis_mask}, {"ellipsis_mask", ellipsis_mask}, })}, {"Tin", DataTypeSlice{DT_INT32, DT_INT32, DT_INT32, DT_INT32, T, T}}, {"Tout", DataTypeSlice{DT_INT32, DT_INT32, DT_INT32, DT_INT32, T}}})}); VLOG(1) << DebugStringWhole(gdef); auto sess = NewSession(); TF_CHECK_OK(sess->Create(gdef)); std::vector out; TF_CHECK_OK(sess->Run({{"shape:0", shape}, {"begin:0", begin}, {"end:0", end}, {"strides:0", strides}, {"dy:0", dy}, {"grad:0", grad}}, {"dx:0", "dx:1", "dx:2", "dx:3", "dx:4"}, {}, &out)); CHECK_EQ(out.size(), 5); TF_CHECK_OK(sess->Close()); return out; } TEST(ArrayGradTest, StridedSliceGrad) { Tensor x(DT_FLOAT, {2, 3, 4}); x.flat().setZero(); Tensor x_shape = test::AsTensor({2, 3, 4}, {3}); { auto start = test::AsTensor({1, 1, 1}); auto stop = test::AsTensor({2, 3, 3}); auto strides = test::AsTensor({1, 1, 1}); Tensor dy(DT_FLOAT, {1, 2, 2}); test::FillIota(&dy, 1); int begin_mask = 0, end_mask = 0, new_axis_mask = 0, shrink_axis_mask = 0, ellipsis_mask = 0; auto dx = StridedSliceGrad(x, start, stop, strides, dy, begin_mask, end_mask, ellipsis_mask, new_axis_mask, shrink_axis_mask); test::ExpectClose(dx[0], test::AsTensor( { 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 2., 0., 0., 3., 4., 0., }, {2, 3, 4})); test::ExpectTensorEqual(dx[1], test::AsTensor({0, 0, 0})); test::ExpectTensorEqual(dx[2], test::AsTensor({0, 0, 0})); auto ddx = StridedSliceGradGrad(x_shape, start, stop, strides, dy, dx[0], begin_mask, end_mask, ellipsis_mask, new_axis_mask, shrink_axis_mask); test::ExpectClose(ddx[4], dy); } // test equivalent of python tf.gradients(foo[1:2, 1:3, 1:3]) { auto start = test::AsTensor({1, 1, 1}); auto stop = test::AsTensor({2, 3, 3}); auto strides = test::AsTensor({1, 1, 1}); Tensor dy(DT_FLOAT, {1, 2, 2}); test::FillIota(&dy, 1); int begin_mask = 0, end_mask = 0, new_axis_mask = 0, shrink_axis_mask = 0, ellipsis_mask = 0; auto dx = StridedSliceGrad(x, start, stop, strides, dy, begin_mask, end_mask, ellipsis_mask, new_axis_mask, shrink_axis_mask); test::ExpectClose(dx[0], test::AsTensor( { 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 2., 0., 0., 3., 4., 0., }, {2, 3, 4})); test::ExpectTensorEqual(dx[1], test::AsTensor({0, 0, 0})); test::ExpectTensorEqual(dx[2], test::AsTensor({0, 0, 0})); auto ddx = StridedSliceGradGrad(x_shape, start, stop, strides, dy, dx[0], begin_mask, end_mask, ellipsis_mask, new_axis_mask, shrink_axis_mask); test::ExpectClose(ddx[4], dy); } // test equivalent of python tf.gradients(foo[1, 1:, :-2, None]) { int dontcare = 66; auto start = test::AsTensor({1, 1, dontcare, dontcare}); auto stop = test::AsTensor({2, dontcare, -2, dontcare}); auto strides = test::AsTensor({1, 1, 1, dontcare}); Tensor dy(DT_FLOAT, {2, 2, 1}); test::FillIota(&dy, 1); int begin_mask = 4, end_mask = 2, new_axis_mask = 8, shrink_axis_mask = 1, ellipsis_mask = 0; auto dx = StridedSliceGrad(x, start, stop, strides, dy, begin_mask, end_mask, ellipsis_mask, new_axis_mask, shrink_axis_mask); test::ExpectClose(dx[0], test::AsTensor( { 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 2., 0., 0., 3., 4., 0., 0., }, {2, 3, 4})); test::ExpectTensorEqual(dx[1], test::AsTensor({0, 0, 0, 0})); test::ExpectTensorEqual(dx[2], test::AsTensor({0, 0, 0, 0})); auto ddx = StridedSliceGradGrad(x_shape, start, stop, strides, dy, dx[0], begin_mask, end_mask, ellipsis_mask, new_axis_mask, shrink_axis_mask); test::ExpectClose(ddx[4], dy); } // test equivalent of tf.gradients(foo[1, ...]) i.e. foo[1, 0:3, 0:4] { int dontcare = 66; auto start = test::AsTensor({1, dontcare}); auto stop = test::AsTensor({2, dontcare}); auto strides = test::AsTensor({1, 1}); Tensor dy(DT_FLOAT, {3, 4}); test::FillIota(&dy, 1); int begin_mask = 0, end_mask = 0, new_axis_mask = 0, shrink_axis_mask = 1, ellipsis_mask = 2; auto dx = StridedSliceGrad(x, start, stop, strides, dy, begin_mask, end_mask, ellipsis_mask, new_axis_mask, shrink_axis_mask); test::ExpectClose(dx[0], test::AsTensor( { 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., }, {2, 3, 4})); test::ExpectTensorEqual(dx[1], test::AsTensor({0, 0})); test::ExpectTensorEqual(dx[2], test::AsTensor({0, 0})); auto ddx = StridedSliceGradGrad(x_shape, start, stop, strides, dy, dx[0], begin_mask, end_mask, ellipsis_mask, new_axis_mask, shrink_axis_mask); test::ExpectClose(ddx[4], dy); } } } // namespace } // namespace tensorflow