diff options
author | A. Unique TensorFlower <gardener@tensorflow.org> | 2017-05-01 16:14:06 -0800 |
---|---|---|
committer | TensorFlower Gardener <gardener@tensorflow.org> | 2017-05-01 17:31:34 -0700 |
commit | aebaf317ce50ef42ebaab91191224c872389a6f6 (patch) | |
tree | de9dc459f797e990a057b6aba5f9c76634937abe /tensorflow/core/common_runtime/function_test.cc | |
parent | 0135602ffd33f21d702ee7a3a55e86cd85af45ca (diff) |
This CL removes the Graph.edge_set_ field. This field stores a set of the Edge* that are in a Graph. However, Graph already stores this information, in Graph.edges_. There's really no good reason to keep both of these collections. To convert everything to use Graph.edges_ instead of Graph.edge_set_, I defined a class which handled excluding nullptr from iteration of the edges_ vector.
This caused changes to non-contractual behavior of the runtime (enumeration order), so the unit tests are updated to reflect this.
On a real-world graph used by our team, which contains 13190 nodes and 20796 edges, this change reduced heap allocation from 39.1 MB to 38.0 MB, for a drop of about 3%.
Change: 154781831
Diffstat (limited to 'tensorflow/core/common_runtime/function_test.cc')
-rw-r--r-- | tensorflow/core/common_runtime/function_test.cc | 14 |
1 files changed, 7 insertions, 7 deletions
diff --git a/tensorflow/core/common_runtime/function_test.cc b/tensorflow/core/common_runtime/function_test.cc index bbf35590eb..8f70ab8783 100644 --- a/tensorflow/core/common_runtime/function_test.cc +++ b/tensorflow/core/common_runtime/function_test.cc @@ -424,7 +424,7 @@ TEST_F(FunctionLibraryRuntimeTest, ControlDeps) { n8 = NoOp() @ n4 n9 = Identity[T=float](n3) @ n8 n10 = Identity[T=float](n2) @ n8 - n11 = NoOp() @ n10, n9 + n11 = NoOp() @ n9, n10 n5 = Mul[T=float](n2, n2) @ n11 n6 = Add[T=float](n4, n5) } @@ -500,8 +500,8 @@ TEST_F(FunctionLibraryRuntimeTest, Gradient_XTimesTwo) { OptimizeGraph(lib_, &g); const char* e2 = R"P( (n2:float, n3:float) -> (n9:float) { - n11 = Const[dtype=int32, value=Tensor<type: int32 shape: [0] values: >]() n10 = Const[dtype=float, value=Tensor<type: float shape: [] values: 2>]() + n11 = Const[dtype=int32, value=Tensor<type: int32 shape: [0] values: >]() n6 = Shape[T=float, out_type=int32](n2) n5 = Mul[T=float](n3, n10) n7 = BroadcastGradientArgs[T=int32](n6, n11) @@ -614,10 +614,10 @@ TEST_F(FunctionLibraryRuntimeTest, Gradient_AddSum) { n17 = Sum[T=float, Tidx=int32, keep_dims=false](n14, n16) n19 = SymbolicGradient[Tin={float, int32, float}, Tout={float, int32}, f=Sum[T=float, Tidx=int32, keep_dims=false]](n14, n16, n26) n21 = SymbolicGradient[Tin={float, float, float}, Tout={float, float}, f=Add[T=float]](n24, n25, n19) - n28 = Identity[T=float](n21:1) n27 = Identity[T=float](n21) - n6 = Identity[T=float](n28) + n28 = Identity[T=float](n21:1) n8 = Identity[T=float](n27) + n6 = Identity[T=float](n28) } )P"; EXPECT_EQ(e1, DebugString(g.get())); @@ -626,8 +626,8 @@ TEST_F(FunctionLibraryRuntimeTest, Gradient_AddSum) { const char* e2 = R"P( (n4:float, n3:float) -> (n25:float, n23:float) { n2 = Const[dtype=float, value=Tensor<type: float shape: [] values: 1>]() - n8 = Const[dtype=int32, value=Tensor<type: int32 shape: [] values: 0>]() n7 = Const[dtype=int32, value=Tensor<type: int32 shape: [] values: 1>]() + n8 = Const[dtype=int32, value=Tensor<type: int32 shape: [] values: 0>]() n19 = Shape[T=float, out_type=int32](n3) n9 = Add[T=float](n4, n3) n20 = Shape[T=float, out_type=int32](n4) @@ -641,10 +641,10 @@ TEST_F(FunctionLibraryRuntimeTest, Gradient_AddSum) { n16 = Reshape[T=float, Tshape=int32](n2, n15) n17 = Div[T=int32](n14, n15) n18 = Tile[T=float, Tmultiples=int32](n16, n17) - n24 = Sum[T=float, Tidx=int32, keep_dims=false](n18, n21) n22 = Sum[T=float, Tidx=int32, keep_dims=false](n18, n21:1) - n25 = Reshape[T=float, Tshape=int32](n24, n20) + n24 = Sum[T=float, Tidx=int32, keep_dims=false](n18, n21) n23 = Reshape[T=float, Tshape=int32](n22, n19) + n25 = Reshape[T=float, Tshape=int32](n24, n20) } )P"; EXPECT_EQ(e2, DebugString(g.get())); |