aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/cc/framework
diff options
context:
space:
mode:
authorGravatar Skye Wanderman-Milne <skyewm@google.com>2017-02-13 21:39:15 -0800
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2017-02-13 21:46:32 -0800
commit07deeacf33bd60b776ad35394f5767c42feef138 (patch)
tree1ff485f344132f8668ea6bb37441519b81c87d98 /tensorflow/cc/framework
parentd85616e623028464027cf67683be696ec666a79c (diff)
C++ API: include optional attrs in generated class comments + improve core docs
Old: /// Copy Op. /// /// Performs CPU-to-CPU or GPU-to-GPU deep-copying of tensor, depending on the /// device on which the tensor is allocated. /// /// Unlike the CopyHost Op, this op does not have HostMemory constraint on its /// input or output. /// /// Arguments: /// * scope: A Scope object /// * input: Input tensor. /// /// Returns: /// * `Output`: Output tensor, deep-copied from input. class Copy {...} New: /// Copy Op. /// /// Performs CPU-to-CPU or GPU-to-GPU deep-copying of tensor, depending on the /// device on which the tensor is allocated. /// /// Unlike the CopyHost Op, this op does not have HostMemory constraint on its /// input or output. /// /// Arguments: /// * scope: A Scope object /// * input: Input tensor. /// /// Optional attributes (see `Attrs`): /// * tensor_name: The name of the input tensor. /// /// Returns: /// * `Output`: Output tensor, deep-copied from input. class Copy {...} Change: 147432712
Diffstat (limited to 'tensorflow/cc/framework')
-rw-r--r--tensorflow/cc/framework/cc_op_gen.cc40
-rw-r--r--tensorflow/cc/framework/ops.h3
2 files changed, 31 insertions, 12 deletions
diff --git a/tensorflow/cc/framework/cc_op_gen.cc b/tensorflow/cc/framework/cc_op_gen.cc
index 4f32be0e40..22cd7fb0d4 100644
--- a/tensorflow/cc/framework/cc_op_gen.cc
+++ b/tensorflow/cc/framework/cc_op_gen.cc
@@ -437,6 +437,7 @@ OpInfo::OpInfo(const OpDef& g_op_def, const OpDef& i_op_def,
}
strings::StrAppend(&comment, "\nArguments:\n* scope: A Scope object\n");
+ // Process inputs
for (int i = 0; i < op_def.input_arg_size(); ++i) {
const auto& arg(op_def.input_arg(i));
arg_types.push_back(strings::StrCat(
@@ -451,30 +452,45 @@ OpInfo::OpInfo(const OpDef& g_op_def, const OpDef& i_op_def,
arg.description(), "\n");
}
}
+
+ // Process attrs
+ string required_attrs_comment;
+ string optional_attrs_comment;
for (int i = 0; i < op_def.attr_size(); ++i) {
const auto& attr(op_def.attr(i));
- // If the attr is going to be inferred or is optional, don't add it as a
- // required argument.
- if ((inferred_input_attrs.find(attr.name()) !=
- inferred_input_attrs.end()) ||
- attr.has_default_value()) {
- continue;
- }
+ // Skip inferred arguments
+ if (inferred_input_attrs.count(attr.name()) > 0) continue;
+
const auto entry = AttrTypeName(attr.type());
const auto attr_type_name = entry.first;
const bool use_const = entry.second;
+ string attr_name = AvoidCPPKeywords(attr.name());
- arg_types.push_back(strings::StrCat(use_const ? "const " : "",
- attr_type_name, use_const ? "&" : ""));
- arg_names.push_back(AvoidCPPKeywords(attr.name()));
+ string attr_comment;
if (!attr.description().empty()) {
- strings::StrAppend(&comment, "* ", AvoidCPPKeywords(attr.name()), ":\n");
// TODO(keveman): Word wrap and indent this, to handle multi-line
// descriptions.
- strings::StrAppend(&comment, " ", attr.description(), "\n");
+ strings::StrAppend(&attr_comment, "* ", attr_name, ": ",
+ attr.description(), "\n");
+ }
+ if (attr.has_default_value()) {
+ strings::StrAppend(&optional_attrs_comment, attr_comment);
+ } else {
+ strings::StrAppend(&required_attrs_comment, attr_comment);
+ arg_types.push_back(strings::StrCat(
+ use_const ? "const " : "", attr_type_name, use_const ? "&" : ""));
+ arg_names.push_back(attr_name);
}
}
+ strings::StrAppend(&comment, required_attrs_comment);
+
+ if (!optional_attrs_comment.empty()) {
+ strings::StrAppend(&comment, "\nOptional attributes (see `Attrs`):\n");
+ strings::StrAppend(&comment, optional_attrs_comment);
+ }
+
+ // Process outputs
for (int i = 0; i < op_def.output_arg_size(); ++i) {
const auto& arg = op_def.output_arg(i);
bool is_list = ArgIsList(arg);
diff --git a/tensorflow/cc/framework/ops.h b/tensorflow/cc/framework/ops.h
index c47d30ec3c..889d5db31d 100644
--- a/tensorflow/cc/framework/ops.h
+++ b/tensorflow/cc/framework/ops.h
@@ -85,6 +85,7 @@ class Output {
int64 index_ = 0;
};
+/// Hash class that can be used for e.g. storing Outputs in an unordered_map
struct OutputHash {
std::size_t operator()(const Output& output) const {
return Hash64Combine(std::hash<Node*>()(output.node()),
@@ -166,6 +167,7 @@ class Input {
/// initializer list is indeed a valid multi-dimensional tensor.
Initializer(const std::initializer_list<Initializer>& v);
+ // START_SKIP_DOXYGEN
template <typename T, bool = std::is_convertible<T, string>::value>
struct RealType {
typedef string type;
@@ -175,6 +177,7 @@ class Input {
struct RealType<T, false> {
typedef T type;
};
+ // END_SKIP_DOXYGEN
TensorProto AsTensorProto() {
TensorProto tensor_proto;