From 4ec29c5d95ef3b63a756b7a8263892c2fb69cfc5 Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Mon, 31 Jul 2017 09:41:25 -0700 Subject: Avoid direct access to Node::def() where some other method works. PiperOrigin-RevId: 163704839 --- tensorflow/core/common_runtime/function.cc | 4 +++- tensorflow/core/common_runtime/simple_placer.cc | 8 ++++---- tensorflow/core/graph/gradients.cc | 4 +++- tensorflow/core/graph/optimizer_cse.cc | 5 +++-- tensorflow/core/kernels/hexagon/graph_transferer.cc | 12 ++++++------ 5 files changed, 19 insertions(+), 14 deletions(-) diff --git a/tensorflow/core/common_runtime/function.cc b/tensorflow/core/common_runtime/function.cc index 395a64353f..64c3747ce1 100644 --- a/tensorflow/core/common_runtime/function.cc +++ b/tensorflow/core/common_runtime/function.cc @@ -1030,7 +1030,9 @@ void ToGraphDef(const Graph* g, GraphDef* gdef, bool pretty) { NodeDef* ndef = gdef->add_node(); ndef->set_name(NewName(n, pretty)); ndef->set_op(n->type_string()); - *(ndef->mutable_attr()) = n->def().attr(); + for (const auto& attr : n->attrs()) { + (*ndef->mutable_attr())[attr.first] = attr.second; + } inputs.clear(); inputs.resize(n->num_inputs()); for (const Edge* e : n->in_edges()) { diff --git a/tensorflow/core/common_runtime/simple_placer.cc b/tensorflow/core/common_runtime/simple_placer.cc index 6b7c47f8fe..5e6c3d164b 100644 --- a/tensorflow/core/common_runtime/simple_placer.cc +++ b/tensorflow/core/common_runtime/simple_placer.cc @@ -147,7 +147,7 @@ class ColocationGraph { // attribute with the calls to ColocateNodeToGroup. bool found_spec = false; const AttrValue* attr_value = - AttrSlice(node->def()).Find(kColocationAttrNameStringPiece); + node->attrs().Find(kColocationAttrNameStringPiece); if (attr_value != nullptr && attr_value->has_list()) { for (const string& class_spec : attr_value->list().s()) { StringPiece spec(class_spec); @@ -184,7 +184,7 @@ class ColocationGraph { // error, return it. Status s = ColocateNodes(*node, *root_node); if (!s.ok()) { - return AttachDef(s, node->def()); + return AttachDef(s, *node); } } return Status::OK(); @@ -418,7 +418,7 @@ class ColocationGraph { } Status status = InitializeMember(*node, &members_[node->id()]); if (!status.ok()) { - return AttachDef(status, node->def()); + return AttachDef(status, *node); } } return Status::OK(); @@ -727,7 +727,7 @@ Status SimplePlacer::Run() { "be on the same device), but the two nodes " "were assigned two different devices: ", status.error_message()), - dst->def()); + *dst); } } } diff --git a/tensorflow/core/graph/gradients.cc b/tensorflow/core/graph/gradients.cc index d3e7ff781c..6b56613470 100644 --- a/tensorflow/core/graph/gradients.cc +++ b/tensorflow/core/graph/gradients.cc @@ -110,7 +110,9 @@ static Node* AddSymGrad(Graph* g, Node* n, gtl::ArraySlice grads) { AddNodeAttr("Tout", n->input_types(), &ndef); NameAttrList func; func.set_name(n->type_string()); - *(func.mutable_attr()) = n->def().attr(); + for (const auto& attr : n->attrs()) { + (*func.mutable_attr())[attr.first] = attr.second; + } AddNodeAttr("f", func, &ndef); Status s; Node* ret = g->AddNode(ndef, &s); diff --git a/tensorflow/core/graph/optimizer_cse.cc b/tensorflow/core/graph/optimizer_cse.cc index 47337ce8a2..6b452a1d5d 100644 --- a/tensorflow/core/graph/optimizer_cse.cc +++ b/tensorflow/core/graph/optimizer_cse.cc @@ -189,8 +189,9 @@ bool OptimizerCSE::Optimize( if (!n->IsOp()) continue; // Don't prune placeholder nodes. - if (n->def().op() == "Placeholder" || n->def().op() == "PlaceholderV2" || - n->def().op() == "PlaceholderWithDefault") { + if (n->type_string() == "Placeholder" || + n->type_string() == "PlaceholderV2" || + n->type_string() == "PlaceholderWithDefault") { continue; } diff --git a/tensorflow/core/kernels/hexagon/graph_transferer.cc b/tensorflow/core/kernels/hexagon/graph_transferer.cc index 7768acc771..901a41aec4 100644 --- a/tensorflow/core/kernels/hexagon/graph_transferer.cc +++ b/tensorflow/core/kernels/hexagon/graph_transferer.cc @@ -376,7 +376,7 @@ Status GraphTransferer::TransformGraphToAddAggregatedInputNode( std::vector data_types; std::vector shapes; Status status = RemoteFusedGraphExecuteUtils::GetOutputTensorShapeType( - original_input_node->def(), &data_types, &shapes); + original_input_node->attrs(), &data_types, &shapes); if (status.ok()) { created_node->AddAttr( RemoteFusedGraphExecuteUtils::ATTR_OUTPUT_DATA_TYPES, data_types); @@ -579,7 +579,7 @@ bool GraphTransferer::HasPaddingAndStrides(const Node& node) { } bool GraphTransferer::NeedsToAddRank(const Node& node) { - const string& op_type = node.def().op(); + const StringPiece op_type(node.type_string()); if (op_type == "Transpose" || op_type == "ExpandDims") { return true; } @@ -587,7 +587,7 @@ bool GraphTransferer::NeedsToAddRank(const Node& node) { } bool GraphTransferer::IsPadNode(const Node& node) { - const string& op_type = node.def().op(); + const StringPiece op_type(node.type_string()); if (op_type == "Pad") { return true; } @@ -678,7 +678,7 @@ void GraphTransferer::RegisterNodeWithRank( CHECK_NOTNULL(input0_node); std::vector shapes; Status status = RemoteFusedGraphExecuteUtils::GetOutputTensorShapeType( - input0_node->def(), nullptr, &shapes); + input0_node->attrs(), nullptr, &shapes); CHECK_EQ(1, shapes.size()) << "Output size should be 1."; const int const_val_id = RegisterConstScalar(DT_INT32, shapes.at(0).dims(), id, node.num_inputs()); @@ -728,7 +728,7 @@ void GraphTransferer::RegisterPadNode( CHECK(input_node->IsConstant()); const TensorProto* tensor_proto = nullptr; - TF_CHECK_OK(GetNodeAttr(input_node->def(), "value", &tensor_proto)); + TF_CHECK_OK(GetNodeAttr(input_node->attrs(), "value", &tensor_proto)); CHECK_NOTNULL(tensor_proto); Tensor const_tensor; TF_CHECK_OK(MakeTensorFromProto(*tensor_proto, &const_tensor)); @@ -739,7 +739,7 @@ void GraphTransferer::RegisterPadNode( } else if (const_tensor.shape().dim_size(0) < PAD_WIDTH) { const int width = const_tensor.shape().dim_size(0); const TensorProto* proto = nullptr; - TF_CHECK_OK(GetNodeAttr(input_node->def(), "value", &proto)); + TF_CHECK_OK(GetNodeAttr(input_node->attrs(), "value", &proto)); Tensor const_tensor; TF_CHECK_OK(MakeTensorFromProto(*proto, &const_tensor)); CHECK_EQ(DT_INT32, const_tensor.dtype()); -- cgit v1.2.3