aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar Anna R <annarev@google.com>2017-10-11 00:22:33 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2017-10-11 00:26:35 -0700
commit0ed44c0144c9dfae8a53dd3b4f943f23c5a57e37 (patch)
tree7811d9848437548041a2b106de4010ab357a40a9
parentff8f26d5968f01016428e1755adf514362bf880b (diff)
TensorFlow base ApiDefs and tests to make sure they are kept in sync.
PiperOrigin-RevId: 171788007
-rw-r--r--tensorflow/core/BUILD30
-rw-r--r--tensorflow/core/api_def/api_test.cc206
-rw-r--r--tensorflow/core/api_def/base_api/api_def_A.pbtxt670
-rw-r--r--tensorflow/core/api_def/base_api/api_def_B.pbtxt448
-rw-r--r--tensorflow/core/api_def/base_api/api_def_C.pbtxt513
-rw-r--r--tensorflow/core/api_def/base_api/api_def_D.pbtxt790
-rw-r--r--tensorflow/core/api_def/base_api/api_def_E.pbtxt261
-rw-r--r--tensorflow/core/api_def/base_api/api_def_F.pbtxt411
-rw-r--r--tensorflow/core/api_def/base_api/api_def_G.pbtxt257
-rw-r--r--tensorflow/core/api_def/base_api/api_def_H.pbtxt52
-rw-r--r--tensorflow/core/api_def/base_api/api_def_I.pbtxt518
-rw-r--r--tensorflow/core/api_def/base_api/api_def_L.pbtxt392
-rw-r--r--tensorflow/core/api_def/base_api/api_def_M.pbtxt749
-rw-r--r--tensorflow/core/api_def/base_api/api_def_N.pbtxt94
-rw-r--r--tensorflow/core/api_def/base_api/api_def_O.pbtxt195
-rw-r--r--tensorflow/core/api_def/base_api/api_def_P.pbtxt431
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Q.pbtxt609
-rw-r--r--tensorflow/core/api_def/base_api/api_def_R.pbtxt1392
-rw-r--r--tensorflow/core/api_def/base_api/api_def_S.pbtxt2678
-rw-r--r--tensorflow/core/api_def/base_api/api_def_T.pbtxt619
-rw-r--r--tensorflow/core/api_def/base_api/api_def_U.pbtxt150
-rw-r--r--tensorflow/core/api_def/base_api/api_def_V.pbtxt19
-rw-r--r--tensorflow/core/api_def/base_api/api_def_W.pbtxt72
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Z.pbtxt27
-rwxr-xr-xtensorflow/core/api_def/update_api_def.sh28
-rw-r--r--tensorflow/core/framework/op.h3
26 files changed, 11613 insertions, 1 deletions
diff --git a/tensorflow/core/BUILD b/tensorflow/core/BUILD
index f3e43dd552..74aecbc1f2 100644
--- a/tensorflow/core/BUILD
+++ b/tensorflow/core/BUILD
@@ -3316,6 +3316,36 @@ tf_cc_test(
],
)
+filegroup(
+ name = "base_api_def",
+ data = glob(["api_def/base_api/*"]),
+)
+
+tf_cc_test(
+ name = "api_test",
+ srcs = ["api_def/api_test.cc"],
+ data = [
+ ":base_api_def",
+ "//tensorflow/cc:ops/op_gen_overrides.pbtxt",
+ ],
+ tags = [
+ "manual",
+ "notap",
+ ],
+ deps = [
+ ":framework",
+ ":framework_internal",
+ ":lib",
+ ":lib_internal",
+ ":lib_test_internal",
+ ":op_gen_lib",
+ ":op_gen_overrides_proto_cc",
+ ":ops",
+ ":protos_all_cc",
+ ":test",
+ ],
+)
+
tf_cc_test_gpu(
name = "gpu_tracer_test",
size = "small",
diff --git a/tensorflow/core/api_def/api_test.cc b/tensorflow/core/api_def/api_test.cc
new file mode 100644
index 0000000000..ceeb172fa0
--- /dev/null
+++ b/tensorflow/core/api_def/api_test.cc
@@ -0,0 +1,206 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+// Test that verifies tensorflow/core/api_def/base_api/api_def*.pbtxt files
+// are correct. If api_def*.pbtxt do not match expected contents, run
+// tensorflow/core/api_def/base_api/update_api_def.sh script to update them.
+
+#include <ctype.h>
+#include <algorithm>
+#include <string>
+#include <unordered_map>
+#include <vector>
+
+#include "tensorflow/core/framework/api_def.pb.h"
+#include "tensorflow/core/framework/attr_value.pb.h"
+#include "tensorflow/core/framework/op.h"
+#include "tensorflow/core/framework/op_def.pb.h"
+#include "tensorflow/core/framework/op_gen_lib.h"
+#include "tensorflow/core/framework/op_gen_overrides.pb.h"
+#include "tensorflow/core/lib/core/status.h"
+#include "tensorflow/core/lib/core/status_test_util.h"
+#include "tensorflow/core/lib/io/path.h"
+#include "tensorflow/core/lib/strings/stringprintf.h"
+#include "tensorflow/core/platform/env.h"
+#include "tensorflow/core/platform/init_main.h"
+#include "tensorflow/core/platform/protobuf.h"
+#include "tensorflow/core/platform/test.h"
+#include "tensorflow/core/platform/types.h"
+#include "tensorflow/core/util/command_line_flags.h"
+
+namespace tensorflow {
+namespace {
+constexpr char kDefaultApiDefDir[] =
+ "tensorflow/core/api_def/base_api";
+constexpr char kOverridesFilePath[] =
+ "tensorflow/cc/ops/op_gen_overrides.pbtxt";
+constexpr char kApiDefFileFormat[] = "api_def_%c.pbtxt";
+constexpr char kAlphabet[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZ";
+
+// Get map from first character to ApiDefs for ops
+// that start with that character.
+std::unordered_map<char, ApiDefs> GenerateApiDef(
+ const OpList& ops, const OpGenOverrides& overrides) {
+ std::unordered_map<string, OpGenOverride> name_to_override;
+ for (const auto& op_override : overrides.op()) {
+ name_to_override[op_override.name()] = op_override;
+ }
+
+ std::unordered_map<char, ApiDefs> api_defs_map;
+
+ for (const auto& op : ops.op()) {
+ CHECK(!op.name().empty())
+ << "Encountered empty op name: %s" << op.DebugString();
+ const char file_id = toupper(op.name()[0]);
+ CHECK(isalpha(file_id)) << "Unexpected op name: " << op.name();
+ ApiDef* api_def = api_defs_map[file_id].add_op();
+ api_def->set_graph_op_name(op.name());
+
+ if (name_to_override.find(op.name()) != name_to_override.end()) {
+ const auto& op_override = name_to_override[op.name()];
+ // Set visibility
+ if (op_override.skip()) {
+ api_def->set_visibility(ApiDef_Visibility_SKIP);
+ } else if (op_override.hide()) {
+ api_def->set_visibility(ApiDef_Visibility_HIDDEN);
+ }
+ // Add endpoints
+ if (!op_override.rename_to().empty()) {
+ auto* endpoint = api_def->add_endpoint();
+ endpoint->set_name(op_override.rename_to());
+ } else {
+ auto* endpoint = api_def->add_endpoint();
+ endpoint->set_name(op.name());
+ }
+ for (auto& alias : op_override.alias()) {
+ auto* endpoint = api_def->add_endpoint();
+ endpoint->set_name(alias);
+ }
+ // Add attributes
+ for (auto& attr : op.attr()) {
+ auto* api_def_attr = api_def->add_attr();
+ api_def_attr->set_name(attr.name());
+ for (auto& attr_override : op_override.attr_default()) {
+ if (attr.name() == attr_override.name()) {
+ *(api_def_attr->mutable_default_value()) = attr_override.value();
+ }
+ }
+ for (auto& attr_rename : op_override.attr_rename()) {
+ if (attr.name() == attr_rename.from()) {
+ api_def_attr->set_rename_to(attr_rename.to());
+ }
+ }
+ }
+ } else {
+ auto* endpoint = api_def->add_endpoint();
+ endpoint->set_name(op.name());
+ }
+ // Add docs
+ api_def->set_summary(op.summary());
+ api_def->set_description(op.description());
+ }
+ return api_defs_map;
+}
+
+// Reads golden api defs file with the given suffix.
+string GetGoldenApiDefsStr(Env* env, const string& api_files_dir, char suffix) {
+ string file_path = strings::Printf(
+ io::JoinPath(api_files_dir, kApiDefFileFormat).c_str(), suffix);
+ if (env->FileExists(file_path).ok()) {
+ string file_contents;
+ TF_EXPECT_OK(ReadFileToString(env, file_path, &file_contents));
+ return file_contents;
+ }
+ return "";
+}
+
+void RunApiTest(bool update_api_def, const string& api_files_dir) {
+ // Read C++ overrides file
+ string overrides_file_contents;
+ Env* env = Env::Default();
+ TF_EXPECT_OK(
+ ReadFileToString(env, kOverridesFilePath, &overrides_file_contents));
+
+ // Read all ops
+ OpList ops;
+ OpRegistry::Global()->Export(false, &ops);
+ const std::vector<string> multi_line_fields = {"description"};
+
+ // Get expected ApiDefs
+ OpGenOverrides overrides;
+ auto new_api_defs_map = GenerateApiDef(ops, overrides);
+
+ bool updated_at_least_one_file = false;
+
+ for (char c : kAlphabet) {
+ string golden_api_defs_str = GetGoldenApiDefsStr(env, api_files_dir, c);
+ string new_api_defs_str = new_api_defs_map[c].DebugString();
+ new_api_defs_str = PBTxtToMultiline(new_api_defs_str, multi_line_fields);
+ if (golden_api_defs_str == new_api_defs_str) {
+ continue;
+ }
+ if (update_api_def) {
+ string output_file_path =
+ io::JoinPath(api_files_dir, strings::Printf(kApiDefFileFormat, c));
+ if (new_api_defs_str.empty()) {
+ std::cout << "Deleting " << output_file_path << "..." << std::endl;
+ TF_EXPECT_OK(env->DeleteFile(output_file_path));
+ } else {
+ std::cout << "Updating " << output_file_path << "..." << std::endl;
+ TF_EXPECT_OK(
+ WriteStringToFile(env, output_file_path, new_api_defs_str));
+ }
+ updated_at_least_one_file = true;
+ } else {
+ EXPECT_EQ(golden_api_defs_str, new_api_defs_str)
+ << "To update golden API files, run "
+ << "tensorflow/core/api_def/update_api_def.sh.";
+ }
+ }
+
+ if (update_api_def && !updated_at_least_one_file) {
+ std::cout << "Api def files are already up to date." << std::endl;
+ }
+}
+
+TEST(ApiTest, GenerateBaseAPIDef) { RunApiTest(false, kDefaultApiDefDir); }
+} // namespace
+} // namespace tensorflow
+
+int main(int argc, char** argv) {
+ bool update_api_def = false;
+ tensorflow::string api_files_dir = tensorflow::kDefaultApiDefDir;
+ std::vector<tensorflow::Flag> flag_list = {
+ tensorflow::Flag(
+ "update_api_def", &update_api_def,
+ "Whether to update tensorflow/core/api_def/base_api/api_def*.pbtxt "
+ "files if they differ from expected API."),
+ tensorflow::Flag("api_def_dir", &api_files_dir,
+ "Base directory of api_def*.pbtxt files.")};
+ std::string usage = tensorflow::Flags::Usage(argv[0], flag_list);
+ bool parsed_values_ok = tensorflow::Flags::Parse(&argc, argv, flag_list);
+ if (!parsed_values_ok) {
+ std::cerr << usage << std::endl;
+ return 2;
+ }
+ if (update_api_def) {
+ tensorflow::port::InitMain(argv[0], &argc, &argv);
+ tensorflow::RunApiTest(update_api_def, api_files_dir);
+ return 0;
+ }
+ testing::InitGoogleTest(&argc, argv);
+ // Run tests
+ return RUN_ALL_TESTS();
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_A.pbtxt b/tensorflow/core/api_def/base_api/api_def_A.pbtxt
new file mode 100644
index 0000000000..8193d1bc62
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_A.pbtxt
@@ -0,0 +1,670 @@
+op {
+ graph_op_name: "Abort"
+ endpoint {
+ name: "Abort"
+ }
+ summary: "Raise a exception to abort the process when called."
+ description: <<END
+If exit_without_error is true, the process will exit normally,
+otherwise it will exit with a SIGABORT signal.
+
+Returns nothing but an exception.
+END
+}
+op {
+ graph_op_name: "Abs"
+ endpoint {
+ name: "Abs"
+ }
+ summary: "Computes the absolute value of a tensor."
+ description: <<END
+Given a tensor `x`, this operation returns a tensor containing the absolute
+value of each element in `x`. For example, if x is an input element and y is
+an output element, this operation computes \\(y = |x|\\).
+END
+}
+op {
+ graph_op_name: "AccumulatorApplyGradient"
+ endpoint {
+ name: "AccumulatorApplyGradient"
+ }
+ summary: "Applies a gradient to a given accumulator."
+ description: <<END
+Does not add if local_step is lesser than the accumulator's global_step.
+END
+}
+op {
+ graph_op_name: "AccumulatorNumAccumulated"
+ endpoint {
+ name: "AccumulatorNumAccumulated"
+ }
+ summary: "Returns the number of gradients aggregated in the given accumulators."
+}
+op {
+ graph_op_name: "AccumulatorSetGlobalStep"
+ endpoint {
+ name: "AccumulatorSetGlobalStep"
+ }
+ summary: "Updates the accumulator with a new value for global_step."
+ description: <<END
+Logs warning if the accumulator's value is already higher than
+new_global_step.
+END
+}
+op {
+ graph_op_name: "AccumulatorTakeGradient"
+ endpoint {
+ name: "AccumulatorTakeGradient"
+ }
+ summary: "Extracts the average gradient in the given ConditionalAccumulator."
+ description: <<END
+The op blocks until sufficient (i.e., more than num_required)
+gradients have been accumulated. If the accumulator has already
+aggregated more than num_required gradients, it returns the average of
+the accumulated gradients. Also automatically increments the recorded
+global_step in the accumulator by 1, and resets the aggregate to 0.
+END
+}
+op {
+ graph_op_name: "Acos"
+ endpoint {
+ name: "Acos"
+ }
+ summary: "Computes acos of x element-wise."
+}
+op {
+ graph_op_name: "Acosh"
+ endpoint {
+ name: "Acosh"
+ }
+ summary: "Computes inverse hyperbolic cosine of x element-wise."
+}
+op {
+ graph_op_name: "Add"
+ endpoint {
+ name: "Add"
+ }
+ summary: "Returns x + y element-wise."
+ description: <<END
+*NOTE*: `Add` supports broadcasting. `AddN` does not. More about broadcasting
+[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+END
+}
+op {
+ graph_op_name: "AddManySparseToTensorsMap"
+ endpoint {
+ name: "AddManySparseToTensorsMap"
+ }
+ summary: "Add an `N`-minibatch `SparseTensor` to a `SparseTensorsMap`, return `N` handles."
+ description: <<END
+A `SparseTensor` of rank `R` is represented by three tensors: `sparse_indices`,
+`sparse_values`, and `sparse_shape`, where
+
+```sparse_indices.shape[1] == sparse_shape.shape[0] == R```
+
+An `N`-minibatch of `SparseTensor` objects is represented as a `SparseTensor`
+having a first `sparse_indices` column taking values between `[0, N)`, where
+the minibatch size `N == sparse_shape[0]`.
+
+The input `SparseTensor` must have rank `R` greater than 1, and the first
+dimension is treated as the minibatch dimension. Elements of the `SparseTensor`
+must be sorted in increasing order of this first dimension. The stored
+`SparseTensor` objects pointed to by each row of the output `sparse_handles`
+will have rank `R-1`.
+
+The `SparseTensor` values can then be read out as part of a minibatch by passing
+the given keys as vector elements to `TakeManySparseFromTensorsMap`. To ensure
+the correct `SparseTensorsMap` is accessed, ensure that the same
+`container` and `shared_name` are passed to that Op. If no `shared_name`
+is provided here, instead use the *name* of the Operation created by calling
+`AddManySparseToTensorsMap` as the `shared_name` passed to
+`TakeManySparseFromTensorsMap`. Ensure the Operations are colocated.
+END
+}
+op {
+ graph_op_name: "AddN"
+ endpoint {
+ name: "AddN"
+ }
+ summary: "Add all input tensors element wise."
+}
+op {
+ graph_op_name: "AddSparseToTensorsMap"
+ endpoint {
+ name: "AddSparseToTensorsMap"
+ }
+ summary: "Add a `SparseTensor` to a `SparseTensorsMap` return its handle."
+ description: <<END
+A `SparseTensor` is represented by three tensors: `sparse_indices`,
+`sparse_values`, and `sparse_shape`.
+
+This operator takes the given `SparseTensor` and adds it to a container
+object (a `SparseTensorsMap`). A unique key within this container is generated
+in the form of an `int64`, and this is the value that is returned.
+
+The `SparseTensor` can then be read out as part of a minibatch by passing
+the key as a vector element to `TakeManySparseFromTensorsMap`. To ensure
+the correct `SparseTensorsMap` is accessed, ensure that the same
+`container` and `shared_name` are passed to that Op. If no `shared_name`
+is provided here, instead use the *name* of the Operation created by calling
+`AddSparseToTensorsMap` as the `shared_name` passed to
+`TakeManySparseFromTensorsMap`. Ensure the Operations are colocated.
+END
+}
+op {
+ graph_op_name: "AdjustContrast"
+ endpoint {
+ name: "AdjustContrast"
+ }
+ summary: "Deprecated. Disallowed in GraphDef version >= 2."
+}
+op {
+ graph_op_name: "AdjustContrastv2"
+ endpoint {
+ name: "AdjustContrastv2"
+ }
+ summary: "Adjust the contrast of one or more images."
+ description: <<END
+`images` is a tensor of at least 3 dimensions. The last 3 dimensions are
+interpreted as `[height, width, channels]`. The other dimensions only
+represent a collection of images, such as `[batch, height, width, channels].`
+
+Contrast is adjusted independently for each channel of each image.
+
+For each channel, the Op first computes the mean of the image pixels in the
+channel and then adjusts each component of each pixel to
+`(x - mean) * contrast_factor + mean`.
+END
+}
+op {
+ graph_op_name: "AdjustHue"
+ endpoint {
+ name: "AdjustHue"
+ }
+ summary: "Adjust the hue of one or more images."
+ description: <<END
+`images` is a tensor of at least 3 dimensions. The last dimension is
+interpretted as channels, and must be three.
+
+The input image is considered in the RGB colorspace. Conceptually, the RGB
+colors are first mapped into HSV. A delta is then applied all the hue values,
+and then remapped back to RGB colorspace.
+END
+}
+op {
+ graph_op_name: "AdjustSaturation"
+ endpoint {
+ name: "AdjustSaturation"
+ }
+ summary: "Adjust the saturation of one or more images."
+ description: <<END
+`images` is a tensor of at least 3 dimensions. The last dimension is
+interpretted as channels, and must be three.
+
+The input image is considered in the RGB colorspace. Conceptually, the RGB
+colors are first mapped into HSV. A scale is then applied all the saturation
+values, and then remapped back to RGB colorspace.
+END
+}
+op {
+ graph_op_name: "All"
+ endpoint {
+ name: "All"
+ }
+ summary: "Computes the \"logical and\" of elements across dimensions of a tensor."
+ description: <<END
+Reduces `input` along the dimensions given in `reduction_indices`. Unless
+`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
+`reduction_indices`. If `keep_dims` is true, the reduced dimensions are
+retained with length 1.
+END
+}
+op {
+ graph_op_name: "AllCandidateSampler"
+ endpoint {
+ name: "AllCandidateSampler"
+ }
+ summary: "Generates labels for candidate sampling with a learned unigram distribution."
+ description: <<END
+See explanations of candidate sampling and the data formats at
+go/candidate-sampling.
+
+For each batch, this op picks a single set of sampled candidate labels.
+
+The advantages of sampling candidates per-batch are simplicity and the
+possibility of efficient dense matrix multiplication. The disadvantage is that
+the sampled candidates must be chosen independently of the context and of the
+true labels.
+END
+}
+op {
+ graph_op_name: "Angle"
+ endpoint {
+ name: "Angle"
+ }
+ summary: "Returns the argument of a complex number."
+ description: <<END
+Given a tensor `input` of complex numbers, this operation returns a tensor of
+type `float` that is the argument of each element in `input`. All elements in
+`input` must be complex numbers of the form \\(a + bj\\), where *a*
+is the real part and *b* is the imaginary part.
+
+The argument returned by this operation is of the form \\(atan2(b, a)\\).
+
+For example:
+
+```
+# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
+tf.angle(input) ==> [2.0132, 1.056]
+```
+
+@compatibility(numpy)
+Equivalent to np.angle.
+@end_compatibility
+END
+}
+op {
+ graph_op_name: "Any"
+ endpoint {
+ name: "Any"
+ }
+ summary: "Computes the \"logical or\" of elements across dimensions of a tensor."
+ description: <<END
+Reduces `input` along the dimensions given in `reduction_indices`. Unless
+`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
+`reduction_indices`. If `keep_dims` is true, the reduced dimensions are
+retained with length 1.
+END
+}
+op {
+ graph_op_name: "ApplyAdadelta"
+ endpoint {
+ name: "ApplyAdadelta"
+ }
+ summary: "Update \'*var\' according to the adadelta scheme."
+ description: <<END
+accum = rho() * accum + (1 - rho()) * grad.square();
+update = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad;
+update_accum = rho() * update_accum + (1 - rho()) * update.square();
+var -= update;
+END
+}
+op {
+ graph_op_name: "ApplyAdagrad"
+ endpoint {
+ name: "ApplyAdagrad"
+ }
+ summary: "Update \'*var\' according to the adagrad scheme."
+ description: <<END
+accum += grad * grad
+var -= lr * grad * (1 / sqrt(accum))
+END
+}
+op {
+ graph_op_name: "ApplyAdagradDA"
+ endpoint {
+ name: "ApplyAdagradDA"
+ }
+ summary: "Update \'*var\' according to the proximal adagrad scheme."
+}
+op {
+ graph_op_name: "ApplyAdam"
+ endpoint {
+ name: "ApplyAdam"
+ }
+ summary: "Update \'*var\' according to the Adam algorithm."
+ description: <<END
+lr_t <- learning_rate * sqrt(1 - beta2^t) / (1 - beta1^t)
+m_t <- beta1 * m_{t-1} + (1 - beta1) * g_t
+v_t <- beta2 * v_{t-1} + (1 - beta2) * g_t * g_t
+variable <- variable - lr_t * m_t / (sqrt(v_t) + epsilon)
+END
+}
+op {
+ graph_op_name: "ApplyCenteredRMSProp"
+ endpoint {
+ name: "ApplyCenteredRMSProp"
+ }
+ summary: "Update \'*var\' according to the centered RMSProp algorithm."
+ description: <<END
+The centered RMSProp algorithm uses an estimate of the centered second moment
+(i.e., the variance) for normalization, as opposed to regular RMSProp, which
+uses the (uncentered) second moment. This often helps with training, but is
+slightly more expensive in terms of computation and memory.
+
+Note that in dense implementation of this algorithm, mg, ms, and mom will
+update even if the grad is zero, but in this sparse implementation, mg, ms,
+and mom will not update in iterations during which the grad is zero.
+
+mean_square = decay * mean_square + (1-decay) * gradient ** 2
+mean_grad = decay * mean_grad + (1-decay) * gradient
+
+Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)
+
+mg <- rho * mg_{t-1} + (1-rho) * grad
+ms <- rho * ms_{t-1} + (1-rho) * grad * grad
+mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon)
+var <- var - mom
+END
+}
+op {
+ graph_op_name: "ApplyFtrl"
+ endpoint {
+ name: "ApplyFtrl"
+ }
+ summary: "Update \'*var\' according to the Ftrl-proximal scheme."
+ description: <<END
+accum_new = accum + grad * grad
+linear += grad + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
+quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
+var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
+accum = accum_new
+END
+}
+op {
+ graph_op_name: "ApplyFtrlV2"
+ endpoint {
+ name: "ApplyFtrlV2"
+ }
+ summary: "Update \'*var\' according to the Ftrl-proximal scheme."
+ description: <<END
+grad_with_shrinkage = grad + 2 * l2_shrinkage * var
+accum_new = accum + grad_with_shrinkage * grad_with_shrinkage
+linear += grad_with_shrinkage +
+ (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
+quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
+var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
+accum = accum_new
+END
+}
+op {
+ graph_op_name: "ApplyGradientDescent"
+ endpoint {
+ name: "ApplyGradientDescent"
+ }
+ summary: "Update \'*var\' by subtracting \'alpha\' * \'delta\' from it."
+}
+op {
+ graph_op_name: "ApplyMomentum"
+ endpoint {
+ name: "ApplyMomentum"
+ }
+ summary: "Update \'*var\' according to the momentum scheme. Set use_nesterov = True if you"
+ description: <<END
+want to use Nesterov momentum.
+
+accum = accum * momentum + grad
+var -= lr * accum
+END
+}
+op {
+ graph_op_name: "ApplyProximalAdagrad"
+ endpoint {
+ name: "ApplyProximalAdagrad"
+ }
+ summary: "Update \'*var\' and \'*accum\' according to FOBOS with Adagrad learning rate."
+ description: <<END
+accum += grad * grad
+prox_v = var - lr * grad * (1 / sqrt(accum))
+var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}
+END
+}
+op {
+ graph_op_name: "ApplyProximalGradientDescent"
+ endpoint {
+ name: "ApplyProximalGradientDescent"
+ }
+ summary: "Update \'*var\' as FOBOS algorithm with fixed learning rate."
+ description: <<END
+prox_v = var - alpha * delta
+var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}
+END
+}
+op {
+ graph_op_name: "ApplyRMSProp"
+ endpoint {
+ name: "ApplyRMSProp"
+ }
+ summary: "Update \'*var\' according to the RMSProp algorithm."
+ description: <<END
+Note that in dense implementation of this algorithm, ms and mom will
+update even if the grad is zero, but in this sparse implementation, ms
+and mom will not update in iterations during which the grad is zero.
+
+mean_square = decay * mean_square + (1-decay) * gradient ** 2
+Delta = learning_rate * gradient / sqrt(mean_square + epsilon)
+
+ms <- rho * ms_{t-1} + (1-rho) * grad * grad
+mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
+var <- var - mom
+END
+}
+op {
+ graph_op_name: "ApproximateEqual"
+ endpoint {
+ name: "ApproximateEqual"
+ }
+ summary: "Returns the truth value of abs(x-y) < tolerance element-wise."
+}
+op {
+ graph_op_name: "ArgMax"
+ endpoint {
+ name: "ArgMax"
+ }
+ summary: "Returns the index with the largest value across dimensions of a tensor."
+ description: <<END
+Note that in case of ties the identity of the return value is not guaranteed.
+END
+}
+op {
+ graph_op_name: "ArgMin"
+ endpoint {
+ name: "ArgMin"
+ }
+ summary: "Returns the index with the smallest value across dimensions of a tensor."
+ description: <<END
+Note that in case of ties the identity of the return value is not guaranteed.
+END
+}
+op {
+ graph_op_name: "AsString"
+ endpoint {
+ name: "AsString"
+ }
+ summary: "Converts each entry in the given tensor to strings. Supports many numeric"
+ description: <<END
+types and boolean.
+END
+}
+op {
+ graph_op_name: "Asin"
+ endpoint {
+ name: "Asin"
+ }
+ summary: "Computes asin of x element-wise."
+}
+op {
+ graph_op_name: "Asinh"
+ endpoint {
+ name: "Asinh"
+ }
+ summary: "Computes inverse hyperbolic sine of x element-wise."
+}
+op {
+ graph_op_name: "Assert"
+ endpoint {
+ name: "Assert"
+ }
+ summary: "Asserts that the given condition is true."
+ description: <<END
+If `condition` evaluates to false, print the list of tensors in `data`.
+`summarize` determines how many entries of the tensors to print.
+END
+}
+op {
+ graph_op_name: "Assign"
+ endpoint {
+ name: "Assign"
+ }
+ summary: "Update \'ref\' by assigning \'value\' to it."
+ description: <<END
+This operation outputs "ref" after the assignment is done.
+This makes it easier to chain operations that need to use the reset value.
+END
+}
+op {
+ graph_op_name: "AssignAdd"
+ endpoint {
+ name: "AssignAdd"
+ }
+ summary: "Update \'ref\' by adding \'value\' to it."
+ description: <<END
+This operation outputs "ref" after the update is done.
+This makes it easier to chain operations that need to use the reset value.
+END
+}
+op {
+ graph_op_name: "AssignSub"
+ endpoint {
+ name: "AssignSub"
+ }
+ summary: "Update \'ref\' by subtracting \'value\' from it."
+ description: <<END
+This operation outputs "ref" after the update is done.
+This makes it easier to chain operations that need to use the reset value.
+END
+}
+op {
+ graph_op_name: "Atan"
+ endpoint {
+ name: "Atan"
+ }
+ summary: "Computes atan of x element-wise."
+}
+op {
+ graph_op_name: "Atan2"
+ endpoint {
+ name: "Atan2"
+ }
+ summary: "Computes arctangent of `y/x` element-wise, respecting signs of the arguments."
+ description: <<END
+This is the angle \( \theta \in [-\pi, \pi] \) such that
+\[ x = r \cos(\theta) \]
+and
+\[ y = r \sin(\theta) \]
+where \(r = \sqrt(x^2 + y^2) \).
+END
+}
+op {
+ graph_op_name: "Atanh"
+ endpoint {
+ name: "Atanh"
+ }
+ summary: "Computes inverse hyperbolic tangent of x element-wise."
+}
+op {
+ graph_op_name: "AudioSpectrogram"
+ endpoint {
+ name: "AudioSpectrogram"
+ }
+ summary: "Produces a visualization of audio data over time."
+ description: <<END
+Spectrograms are a standard way of representing audio information as a series of
+slices of frequency information, one slice for each window of time. By joining
+these together into a sequence, they form a distinctive fingerprint of the sound
+over time.
+
+This op expects to receive audio data as an input, stored as floats in the range
+-1 to 1, together with a window width in samples, and a stride specifying how
+far to move the window between slices. From this it generates a three
+dimensional output. The lowest dimension has an amplitude value for each
+frequency during that time slice. The next dimension is time, with successive
+frequency slices. The final dimension is for the channels in the input, so a
+stereo audio input would have two here for example.
+
+This means the layout when converted and saved as an image is rotated 90 degrees
+clockwise from a typical spectrogram. Time is descending down the Y axis, and
+the frequency decreases from left to right.
+
+Each value in the result represents the square root of the sum of the real and
+imaginary parts of an FFT on the current window of samples. In this way, the
+lowest dimension represents the power of each frequency in the current window,
+and adjacent windows are concatenated in the next dimension.
+
+To get a more intuitive and visual look at what this operation does, you can run
+tensorflow/examples/wav_to_spectrogram to read in an audio file and save out the
+resulting spectrogram as a PNG image.
+END
+}
+op {
+ graph_op_name: "AudioSummary"
+ endpoint {
+ name: "AudioSummary"
+ }
+ summary: "Outputs a `Summary` protocol buffer with audio."
+ description: <<END
+The summary has up to `max_outputs` summary values containing audio. The
+audio is built from `tensor` which must be 3-D with shape `[batch_size,
+frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are
+assumed to be in the range of `[-1.0, 1.0]` with a sample rate of `sample_rate`.
+
+The `tag` argument is a scalar `Tensor` of type `string`. It is used to
+build the `tag` of the summary values:
+
+* If `max_outputs` is 1, the summary value tag is '*tag*/audio'.
+* If `max_outputs` is greater than 1, the summary value tags are
+ generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc.
+END
+}
+op {
+ graph_op_name: "AudioSummaryV2"
+ endpoint {
+ name: "AudioSummaryV2"
+ }
+ summary: "Outputs a `Summary` protocol buffer with audio."
+ description: <<END
+The summary has up to `max_outputs` summary values containing audio. The
+audio is built from `tensor` which must be 3-D with shape `[batch_size,
+frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are
+assumed to be in the range of `[-1.0, 1.0]` with a sample rate of `sample_rate`.
+
+The `tag` argument is a scalar `Tensor` of type `string`. It is used to
+build the `tag` of the summary values:
+
+* If `max_outputs` is 1, the summary value tag is '*tag*/audio'.
+* If `max_outputs` is greater than 1, the summary value tags are
+ generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc.
+END
+}
+op {
+ graph_op_name: "AvgPool"
+ endpoint {
+ name: "AvgPool"
+ }
+ summary: "Performs average pooling on the input."
+ description: <<END
+Each entry in `output` is the mean of the corresponding size `ksize`
+window in `value`.
+END
+}
+op {
+ graph_op_name: "AvgPool3D"
+ endpoint {
+ name: "AvgPool3D"
+ }
+ summary: "Performs 3D average pooling on the input."
+}
+op {
+ graph_op_name: "AvgPool3DGrad"
+ endpoint {
+ name: "AvgPool3DGrad"
+ }
+ summary: "Computes gradients of average pooling function."
+}
+op {
+ graph_op_name: "AvgPoolGrad"
+ endpoint {
+ name: "AvgPoolGrad"
+ }
+ summary: "Computes gradients of the average pooling function."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_B.pbtxt b/tensorflow/core/api_def/base_api/api_def_B.pbtxt
new file mode 100644
index 0000000000..716d397f9a
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_B.pbtxt
@@ -0,0 +1,448 @@
+op {
+ graph_op_name: "Barrier"
+ endpoint {
+ name: "Barrier"
+ }
+ summary: "Defines a barrier that persists across different graph executions."
+ description: <<END
+A barrier represents a key-value map, where each key is a string, and
+each value is a tuple of tensors.
+
+At runtime, the barrier contains 'complete' and 'incomplete'
+elements. A complete element has defined tensors for all components of
+its value tuple, and may be accessed using BarrierTakeMany. An
+incomplete element has some undefined components in its value tuple,
+and may be updated using BarrierInsertMany.
+END
+}
+op {
+ graph_op_name: "BarrierClose"
+ endpoint {
+ name: "BarrierClose"
+ }
+ summary: "Closes the given barrier."
+ description: <<END
+This operation signals that no more new elements will be inserted in the
+given barrier. Subsequent InsertMany that try to introduce a new key will fail.
+Subsequent InsertMany operations that just add missing components to already
+existing elements will continue to succeed. Subsequent TakeMany operations will
+continue to succeed if sufficient completed elements remain in the barrier.
+Subsequent TakeMany operations that would block will fail immediately.
+END
+}
+op {
+ graph_op_name: "BarrierIncompleteSize"
+ endpoint {
+ name: "BarrierIncompleteSize"
+ }
+ summary: "Computes the number of incomplete elements in the given barrier."
+}
+op {
+ graph_op_name: "BarrierInsertMany"
+ endpoint {
+ name: "BarrierInsertMany"
+ }
+ summary: "For each key, assigns the respective value to the specified component."
+ description: <<END
+If a key is not found in the barrier, this operation will create a new
+incomplete element. If a key is found in the barrier, and the element
+already has a value at component_index, this operation will fail with
+INVALID_ARGUMENT, and leave the barrier in an undefined state.
+END
+}
+op {
+ graph_op_name: "BarrierReadySize"
+ endpoint {
+ name: "BarrierReadySize"
+ }
+ summary: "Computes the number of complete elements in the given barrier."
+}
+op {
+ graph_op_name: "BarrierTakeMany"
+ endpoint {
+ name: "BarrierTakeMany"
+ }
+ summary: "Takes the given number of completed elements from a barrier."
+ description: <<END
+This operation concatenates completed-element component tensors along
+the 0th dimension to make a single component tensor.
+
+Elements come out of the barrier when they are complete, and in the order
+in which they were placed into the barrier. The indices output provides
+information about the batch in which each element was originally inserted
+into the barrier.
+END
+}
+op {
+ graph_op_name: "BatchCholesky"
+ endpoint {
+ name: "BatchCholesky"
+ }
+}
+op {
+ graph_op_name: "BatchCholeskyGrad"
+ endpoint {
+ name: "BatchCholeskyGrad"
+ }
+}
+op {
+ graph_op_name: "BatchDataset"
+ endpoint {
+ name: "BatchDataset"
+ }
+ summary: "Creates a dataset that batches `batch_size` elements from `input_dataset`."
+}
+op {
+ graph_op_name: "BatchFFT"
+ endpoint {
+ name: "BatchFFT"
+ }
+}
+op {
+ graph_op_name: "BatchFFT2D"
+ endpoint {
+ name: "BatchFFT2D"
+ }
+}
+op {
+ graph_op_name: "BatchFFT3D"
+ endpoint {
+ name: "BatchFFT3D"
+ }
+}
+op {
+ graph_op_name: "BatchIFFT"
+ endpoint {
+ name: "BatchIFFT"
+ }
+}
+op {
+ graph_op_name: "BatchIFFT2D"
+ endpoint {
+ name: "BatchIFFT2D"
+ }
+}
+op {
+ graph_op_name: "BatchIFFT3D"
+ endpoint {
+ name: "BatchIFFT3D"
+ }
+}
+op {
+ graph_op_name: "BatchMatMul"
+ endpoint {
+ name: "BatchMatMul"
+ }
+ summary: "Multiplies slices of two tensors in batches."
+ description: <<END
+Multiplies all slices of `Tensor` `x` and `y` (each slice can be
+viewed as an element of a batch), and arranges the individual results
+in a single output tensor of the same batch size. Each of the
+individual slices can optionally be adjointed (to adjoint a matrix
+means to transpose and conjugate it) before multiplication by setting
+the `adj_x` or `adj_y` flag to `True`, which are by default `False`.
+
+The input tensors `x` and `y` are 2-D or higher with shape `[..., r_x, c_x]`
+and `[..., r_y, c_y]`.
+
+The output tensor is 2-D or higher with shape `[..., r_o, c_o]`, where:
+
+ r_o = c_x if adj_x else r_x
+ c_o = r_y if adj_y else c_y
+
+It is computed as:
+
+ output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :])
+END
+}
+op {
+ graph_op_name: "BatchMatrixBandPart"
+ endpoint {
+ name: "BatchMatrixBandPart"
+ }
+}
+op {
+ graph_op_name: "BatchMatrixDeterminant"
+ endpoint {
+ name: "BatchMatrixDeterminant"
+ }
+}
+op {
+ graph_op_name: "BatchMatrixDiag"
+ endpoint {
+ name: "BatchMatrixDiag"
+ }
+}
+op {
+ graph_op_name: "BatchMatrixDiagPart"
+ endpoint {
+ name: "BatchMatrixDiagPart"
+ }
+}
+op {
+ graph_op_name: "BatchMatrixInverse"
+ endpoint {
+ name: "BatchMatrixInverse"
+ }
+}
+op {
+ graph_op_name: "BatchMatrixSetDiag"
+ endpoint {
+ name: "BatchMatrixSetDiag"
+ }
+}
+op {
+ graph_op_name: "BatchMatrixSolve"
+ endpoint {
+ name: "BatchMatrixSolve"
+ }
+}
+op {
+ graph_op_name: "BatchMatrixSolveLs"
+ endpoint {
+ name: "BatchMatrixSolveLs"
+ }
+}
+op {
+ graph_op_name: "BatchMatrixTriangularSolve"
+ endpoint {
+ name: "BatchMatrixTriangularSolve"
+ }
+}
+op {
+ graph_op_name: "BatchNormWithGlobalNormalization"
+ endpoint {
+ name: "BatchNormWithGlobalNormalization"
+ }
+ summary: "Batch normalization."
+ description: <<END
+This op is deprecated. Prefer `tf.nn.batch_normalization`.
+END
+}
+op {
+ graph_op_name: "BatchNormWithGlobalNormalizationGrad"
+ endpoint {
+ name: "BatchNormWithGlobalNormalizationGrad"
+ }
+ summary: "Gradients for batch normalization."
+ description: <<END
+This op is deprecated. See `tf.nn.batch_normalization`.
+END
+}
+op {
+ graph_op_name: "BatchSelfAdjointEig"
+ endpoint {
+ name: "BatchSelfAdjointEig"
+ }
+}
+op {
+ graph_op_name: "BatchSelfAdjointEigV2"
+ endpoint {
+ name: "BatchSelfAdjointEigV2"
+ }
+}
+op {
+ graph_op_name: "BatchSvd"
+ endpoint {
+ name: "BatchSvd"
+ }
+}
+op {
+ graph_op_name: "BatchToSpace"
+ endpoint {
+ name: "BatchToSpace"
+ }
+ summary: "BatchToSpace for 4-D tensors of type T."
+ description: <<END
+This is a legacy version of the more general BatchToSpaceND.
+
+Rearranges (permutes) data from batch into blocks of spatial data, followed by
+cropping. This is the reverse transformation of SpaceToBatch. More specifically,
+this op outputs a copy of the input tensor where values from the `batch`
+dimension are moved in spatial blocks to the `height` and `width` dimensions,
+followed by cropping along the `height` and `width` dimensions.
+END
+}
+op {
+ graph_op_name: "BatchToSpaceND"
+ endpoint {
+ name: "BatchToSpaceND"
+ }
+ summary: "BatchToSpace for N-D tensors of type T."
+ description: <<END
+This operation reshapes the "batch" dimension 0 into `M + 1` dimensions of shape
+`block_shape + [batch]`, interleaves these blocks back into the grid defined by
+the spatial dimensions `[1, ..., M]`, to obtain a result with the same rank as
+the input. The spatial dimensions of this intermediate result are then
+optionally cropped according to `crops` to produce the output. This is the
+reverse of SpaceToBatch. See below for a precise description.
+END
+}
+op {
+ graph_op_name: "Betainc"
+ endpoint {
+ name: "Betainc"
+ }
+ summary: "Compute the regularized incomplete beta integral \\\\(I_x(a, b)\\\\)."
+ description: <<END
+The regularized incomplete beta integral is defined as:
+
+
+\\(I_x(a, b) = \frac{B(x; a, b)}{B(a, b)}\\)
+
+where
+
+
+\\(B(x; a, b) = \int_0^x t^{a-1} (1 - t)^{b-1} dt\\)
+
+
+is the incomplete beta function and \\(B(a, b)\\) is the *complete*
+beta function.
+END
+}
+op {
+ graph_op_name: "BiasAdd"
+ endpoint {
+ name: "BiasAdd"
+ }
+ summary: "Adds `bias` to `value`."
+ description: <<END
+This is a special case of `tf.add` where `bias` is restricted to be 1-D.
+Broadcasting is supported, so `value` may have any number of dimensions.
+END
+}
+op {
+ graph_op_name: "BiasAddGrad"
+ endpoint {
+ name: "BiasAddGrad"
+ }
+ summary: "The backward operation for \"BiasAdd\" on the \"bias\" tensor."
+ description: <<END
+It accumulates all the values from out_backprop into the feature dimension.
+For NHWC data format, the feature dimension is the last. For NCHW data format,
+the feature dimension is the third-to-last.
+END
+}
+op {
+ graph_op_name: "BiasAddV1"
+ endpoint {
+ name: "BiasAddV1"
+ }
+ summary: "Adds `bias` to `value`."
+ description: <<END
+This is a deprecated version of BiasAdd and will be soon removed.
+
+This is a special case of `tf.add` where `bias` is restricted to be 1-D.
+Broadcasting is supported, so `value` may have any number of dimensions.
+END
+}
+op {
+ graph_op_name: "Bincount"
+ endpoint {
+ name: "Bincount"
+ }
+ summary: "Counts the number of occurrences of each value in an integer array."
+ description: <<END
+Outputs a vector with length `size` and the same dtype as `weights`. If
+`weights` are empty, then index `i` stores the number of times the value `i` is
+counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of
+the value in `weights` at each index where the corresponding value in `arr` is
+`i`.
+
+Values in `arr` outside of the range [0, size) are ignored.
+END
+}
+op {
+ graph_op_name: "Bitcast"
+ endpoint {
+ name: "Bitcast"
+ }
+ summary: "Bitcasts a tensor from one type to another without copying data."
+ description: <<END
+Given a tensor `input`, this operation returns a tensor that has the same buffer
+data as `input` with datatype `type`.
+
+If the input datatype `T` is larger than the output datatype `type` then the
+shape changes from [...] to [..., sizeof(`T`)/sizeof(`type`)].
+
+If `T` is smaller than `type`, the operator requires that the rightmost
+dimension be equal to sizeof(`type`)/sizeof(`T`). The shape then goes from
+[..., sizeof(`type`)/sizeof(`T`)] to [...].
+
+*NOTE*: Bitcast is implemented as a low-level cast, so machines with different
+endian orderings will give different results.
+END
+}
+op {
+ graph_op_name: "BitwiseAnd"
+ endpoint {
+ name: "BitwiseAnd"
+ }
+ summary: "Elementwise computes the bitwise AND of `x` and `y`."
+ description: <<END
+The result will have those bits set, that are set in both `x` and `y`. The
+computation is performed on the underlying representations of `x` and `y`.
+END
+}
+op {
+ graph_op_name: "BitwiseOr"
+ endpoint {
+ name: "BitwiseOr"
+ }
+ summary: "Elementwise computes the bitwise OR of `x` and `y`."
+ description: <<END
+The result will have those bits set, that are set in `x`, `y` or both. The
+computation is performed on the underlying representations of `x` and `y`.
+END
+}
+op {
+ graph_op_name: "BitwiseXor"
+ endpoint {
+ name: "BitwiseXor"
+ }
+ summary: "Elementwise computes the bitwise XOR of `x` and `y`."
+ description: <<END
+The result will have those bits set, that are different in `x` and `y`. The
+computation is performed on the underlying representations of `x` and `y`.
+END
+}
+op {
+ graph_op_name: "BroadcastArgs"
+ endpoint {
+ name: "BroadcastArgs"
+ }
+ summary: "Return the shape of s0 op s1 with broadcast."
+ description: <<END
+Given `s0` and `s1`, tensors that represent shapes, compute `r0`, the
+broadcasted shape. `s0`, `s1` and `r0` are all integer vectors.
+END
+}
+op {
+ graph_op_name: "BroadcastGradientArgs"
+ endpoint {
+ name: "BroadcastGradientArgs"
+ }
+ summary: "Return the reduction indices for computing gradients of s0 op s1 with broadcast."
+ description: <<END
+This is typically used by gradient computations for a broadcasting operation.
+END
+}
+op {
+ graph_op_name: "Bucketize"
+ endpoint {
+ name: "Bucketize"
+ }
+ summary: "Bucketizes \'input\' based on \'boundaries\'."
+ description: <<END
+For example, if the inputs are
+ boundaries = [0, 10, 100]
+ input = [[-5, 10000]
+ [150, 10]
+ [5, 100]]
+
+then the output will be
+ output = [[0, 3]
+ [3, 2]
+ [1, 3]]
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_C.pbtxt b/tensorflow/core/api_def/base_api/api_def_C.pbtxt
new file mode 100644
index 0000000000..48b04b7971
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_C.pbtxt
@@ -0,0 +1,513 @@
+op {
+ graph_op_name: "CTCBeamSearchDecoder"
+ endpoint {
+ name: "CTCBeamSearchDecoder"
+ }
+ summary: "Performs beam search decoding on the logits given in input."
+ description: <<END
+A note about the attribute merge_repeated: For the beam search decoder,
+this means that if consecutive entries in a beam are the same, only
+the first of these is emitted. That is, when the top path is "A B B B B",
+"A B" is returned if merge_repeated = True but "A B B B B" is
+returned if merge_repeated = False.
+END
+}
+op {
+ graph_op_name: "CTCGreedyDecoder"
+ endpoint {
+ name: "CTCGreedyDecoder"
+ }
+ summary: "Performs greedy decoding on the logits given in inputs."
+ description: <<END
+A note about the attribute merge_repeated: if enabled, when
+consecutive logits' maximum indices are the same, only the first of
+these is emitted. Labeling the blank '*', the sequence "A B B * B B"
+becomes "A B B" if merge_repeated = True and "A B B B B" if
+merge_repeated = False.
+
+Regardless of the value of merge_repeated, if the maximum index of a given
+time and batch corresponds to the blank, index `(num_classes - 1)`, no new
+element is emitted.
+END
+}
+op {
+ graph_op_name: "CTCLoss"
+ endpoint {
+ name: "CTCLoss"
+ }
+ summary: "Calculates the CTC Loss (log probability) for each batch entry. Also calculates"
+ description: <<END
+the gradient. This class performs the softmax operation for you, so inputs
+should be e.g. linear projections of outputs by an LSTM.
+END
+}
+op {
+ graph_op_name: "CacheDataset"
+ endpoint {
+ name: "CacheDataset"
+ }
+ summary: "Creates a dataset that caches elements from `input_dataset`."
+ description: <<END
+A CacheDataset will iterate over the input_dataset, and store tensors. If the
+cache already exists, the cache will be used. If the cache is inappropriate
+(e.g. cannot be opened, contains tensors of the wrong shape / size), an error
+will the returned when used.
+END
+}
+op {
+ graph_op_name: "Cast"
+ endpoint {
+ name: "Cast"
+ }
+ summary: "Cast x of type SrcT to y of DstT."
+}
+op {
+ graph_op_name: "Ceil"
+ endpoint {
+ name: "Ceil"
+ }
+ summary: "Returns element-wise smallest integer in not less than x."
+}
+op {
+ graph_op_name: "CheckNumerics"
+ endpoint {
+ name: "CheckNumerics"
+ }
+ summary: "Checks a tensor for NaN and Inf values."
+ description: <<END
+When run, reports an `InvalidArgument` error if `tensor` has any values
+that are not a number (NaN) or infinity (Inf). Otherwise, passes `tensor` as-is.
+END
+}
+op {
+ graph_op_name: "Cholesky"
+ endpoint {
+ name: "Cholesky"
+ }
+ summary: "Computes the Cholesky decomposition of one or more square matrices."
+ description: <<END
+The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
+form square matrices.
+
+The input has to be symmetric and positive definite. Only the lower-triangular
+part of the input will be used for this operation. The upper-triangular part
+will not be read.
+
+The output is a tensor of the same shape as the input
+containing the Cholesky decompositions for all input submatrices `[..., :, :]`.
+
+**Note**: The gradient computation on GPU is faster for large matrices but
+not for large batch dimensions when the submatrices are small. In this
+case it might be faster to use the CPU.
+END
+}
+op {
+ graph_op_name: "CholeskyGrad"
+ endpoint {
+ name: "CholeskyGrad"
+ }
+ summary: "Computes the reverse mode backpropagated gradient of the Cholesky algorithm."
+ description: <<END
+For an explanation see "Differentiation of the Cholesky algorithm" by
+Iain Murray http://arxiv.org/abs/1602.07527.
+END
+}
+op {
+ graph_op_name: "CompareAndBitpack"
+ endpoint {
+ name: "CompareAndBitpack"
+ }
+ summary: "Compare values of `input` to `threshold` and pack resulting bits into a `uint8`."
+ description: <<END
+Each comparison returns a boolean `true` (if `input_value > threshold`)
+or and `false` otherwise.
+
+This operation is useful for Locality-Sensitive-Hashing (LSH) and other
+algorithms that use hashing approximations of cosine and `L2` distances;
+codes can be generated from an input via:
+
+```python
+codebook_size = 50
+codebook_bits = codebook_size * 32
+codebook = tf.get_variable('codebook', [x.shape[-1].value, codebook_bits],
+ dtype=x.dtype,
+ initializer=tf.orthogonal_initializer())
+codes = compare_and_threshold(tf.matmul(x, codebook), threshold=0.)
+codes = tf.bitcast(codes, tf.int32) # go from uint8 to int32
+# now codes has shape x.shape[:-1] + [codebook_size]
+```
+
+**NOTE**: Currently, the innermost dimension of the tensor must be divisible
+by 8.
+
+Given an `input` shaped `[s0, s1, ..., s_n]`, the output is
+a `uint8` tensor shaped `[s0, s1, ..., s_n / 8]`.
+END
+}
+op {
+ graph_op_name: "Complex"
+ endpoint {
+ name: "Complex"
+ }
+ summary: "Converts two real numbers to a complex number."
+ description: <<END
+Given a tensor `real` representing the real part of a complex number, and a
+tensor `imag` representing the imaginary part of a complex number, this
+operation returns complex numbers elementwise of the form \\(a + bj\\), where
+*a* represents the `real` part and *b* represents the `imag` part.
+
+The input tensors `real` and `imag` must have the same shape.
+
+For example:
+
+```
+# tensor 'real' is [2.25, 3.25]
+# tensor `imag` is [4.75, 5.75]
+tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]]
+```
+END
+}
+op {
+ graph_op_name: "ComplexAbs"
+ endpoint {
+ name: "ComplexAbs"
+ }
+ summary: "Computes the complex absolute value of a tensor."
+ description: <<END
+Given a tensor `x` of complex numbers, this operation returns a tensor of type
+`float` or `double` that is the absolute value of each element in `x`. All
+elements in `x` must be complex numbers of the form \\(a + bj\\). The absolute
+value is computed as \\( \sqrt{a^2 + b^2}\\).
+END
+}
+op {
+ graph_op_name: "ComputeAccidentalHits"
+ endpoint {
+ name: "ComputeAccidentalHits"
+ }
+ summary: "Computes the ids of the positions in sampled_candidates that match true_labels."
+ description: <<END
+When doing log-odds NCE, the result of this op should be passed through a
+SparseToDense op, then added to the logits of the sampled candidates. This has
+the effect of 'removing' the sampled labels that match the true labels by
+making the classifier sure that they are sampled labels.
+END
+}
+op {
+ graph_op_name: "Concat"
+ endpoint {
+ name: "Concat"
+ }
+ summary: "Concatenates tensors along one dimension."
+}
+op {
+ graph_op_name: "ConcatOffset"
+ endpoint {
+ name: "ConcatOffset"
+ }
+ summary: "Computes offsets of concat inputs within its output."
+ description: <<END
+For example:
+
+```
+# 'x' is [2, 2, 7]
+# 'y' is [2, 3, 7]
+# 'z' is [2, 5, 7]
+concat_offset(2, [x, y, z]) => [0, 0, 0], [0, 2, 0], [0, 5, 0]
+```
+
+This is typically used by gradient computations for a concat operation.
+END
+}
+op {
+ graph_op_name: "ConcatV2"
+ endpoint {
+ name: "ConcatV2"
+ }
+ summary: "Concatenates tensors along one dimension."
+}
+op {
+ graph_op_name: "ConcatenateDataset"
+ endpoint {
+ name: "ConcatenateDataset"
+ }
+ summary: "Creates a dataset that concatenates `input_dataset` with `another_dataset`."
+}
+op {
+ graph_op_name: "ConditionalAccumulator"
+ endpoint {
+ name: "ConditionalAccumulator"
+ }
+ summary: "A conditional accumulator for aggregating gradients."
+ description: <<END
+The accumulator accepts gradients marked with local_step greater or
+equal to the most recent global_step known to the accumulator. The
+average can be extracted from the accumulator, provided sufficient
+gradients have been accumulated. Extracting the average automatically
+resets the aggregate to 0, and increments the global_step recorded by
+the accumulator.
+END
+}
+op {
+ graph_op_name: "Conj"
+ endpoint {
+ name: "Conj"
+ }
+ summary: "Returns the complex conjugate of a complex number."
+ description: <<END
+Given a tensor `input` of complex numbers, this operation returns a tensor of
+complex numbers that are the complex conjugate of each element in `input`. The
+complex numbers in `input` must be of the form \\(a + bj\\), where *a* is the
+real part and *b* is the imaginary part.
+
+The complex conjugate returned by this operation is of the form \\(a - bj\\).
+
+For example:
+
+```
+# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
+tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j]
+```
+END
+}
+op {
+ graph_op_name: "Const"
+ endpoint {
+ name: "Const"
+ }
+ summary: "Returns a constant tensor."
+}
+op {
+ graph_op_name: "ControlTrigger"
+ endpoint {
+ name: "ControlTrigger"
+ }
+ summary: "Does nothing. Serves as a control trigger for scheduling."
+ description: <<END
+Only useful as a placeholder for control edges.
+END
+}
+op {
+ graph_op_name: "Conv2D"
+ endpoint {
+ name: "Conv2D"
+ }
+ summary: "Computes a 2-D convolution given 4-D `input` and `filter` tensors."
+ description: <<END
+Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
+and a filter / kernel tensor of shape
+`[filter_height, filter_width, in_channels, out_channels]`, this op
+performs the following:
+
+1. Flattens the filter to a 2-D matrix with shape
+ `[filter_height * filter_width * in_channels, output_channels]`.
+2. Extracts image patches from the input tensor to form a *virtual*
+ tensor of shape `[batch, out_height, out_width,
+ filter_height * filter_width * in_channels]`.
+3. For each patch, right-multiplies the filter matrix and the image patch
+ vector.
+
+In detail, with the default NHWC format,
+
+ output[b, i, j, k] =
+ sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] *
+ filter[di, dj, q, k]
+
+Must have `strides[0] = strides[3] = 1`. For the most common case of the same
+horizontal and vertices strides, `strides = [1, stride, stride, 1]`.
+END
+}
+op {
+ graph_op_name: "Conv2DBackpropFilter"
+ endpoint {
+ name: "Conv2DBackpropFilter"
+ }
+ summary: "Computes the gradients of convolution with respect to the filter."
+}
+op {
+ graph_op_name: "Conv2DBackpropInput"
+ endpoint {
+ name: "Conv2DBackpropInput"
+ }
+ summary: "Computes the gradients of convolution with respect to the input."
+}
+op {
+ graph_op_name: "Conv3D"
+ endpoint {
+ name: "Conv3D"
+ }
+ summary: "Computes a 3-D convolution given 5-D `input` and `filter` tensors."
+ description: <<END
+In signal processing, cross-correlation is a measure of similarity of
+two waveforms as a function of a time-lag applied to one of them. This
+is also known as a sliding dot product or sliding inner-product.
+
+Our Conv3D implements a form of cross-correlation.
+END
+}
+op {
+ graph_op_name: "Conv3DBackpropFilter"
+ endpoint {
+ name: "Conv3DBackpropFilter"
+ }
+ summary: "Computes the gradients of 3-D convolution with respect to the filter."
+}
+op {
+ graph_op_name: "Conv3DBackpropFilterV2"
+ endpoint {
+ name: "Conv3DBackpropFilterV2"
+ }
+ summary: "Computes the gradients of 3-D convolution with respect to the filter."
+}
+op {
+ graph_op_name: "Conv3DBackpropInput"
+ endpoint {
+ name: "Conv3DBackpropInput"
+ }
+ summary: "Computes the gradients of 3-D convolution with respect to the input."
+}
+op {
+ graph_op_name: "Conv3DBackpropInputV2"
+ endpoint {
+ name: "Conv3DBackpropInputV2"
+ }
+ summary: "Computes the gradients of 3-D convolution with respect to the input."
+}
+op {
+ graph_op_name: "Cos"
+ endpoint {
+ name: "Cos"
+ }
+ summary: "Computes cos of x element-wise."
+}
+op {
+ graph_op_name: "Cosh"
+ endpoint {
+ name: "Cosh"
+ }
+ summary: "Computes hyperbolic cosine of x element-wise."
+}
+op {
+ graph_op_name: "CountUpTo"
+ endpoint {
+ name: "CountUpTo"
+ }
+ summary: "Increments \'ref\' until it reaches \'limit\'."
+}
+op {
+ graph_op_name: "CropAndResize"
+ endpoint {
+ name: "CropAndResize"
+ }
+ summary: "Extracts crops from the input image tensor and bilinearly resizes them (possibly"
+ description: <<END
+with aspect ratio change) to a common output size specified by `crop_size`. This
+is more general than the `crop_to_bounding_box` op which extracts a fixed size
+slice from the input image and does not allow resizing or aspect ratio change.
+
+Returns a tensor with `crops` from the input `image` at positions defined at the
+bounding box locations in `boxes`. The cropped boxes are all resized (with
+bilinear interpolation) to a fixed `size = [crop_height, crop_width]`. The
+result is a 4-D tensor `[num_boxes, crop_height, crop_width, depth]`.
+END
+}
+op {
+ graph_op_name: "CropAndResizeGradBoxes"
+ endpoint {
+ name: "CropAndResizeGradBoxes"
+ }
+ summary: "Computes the gradient of the crop_and_resize op wrt the input boxes tensor."
+}
+op {
+ graph_op_name: "CropAndResizeGradImage"
+ endpoint {
+ name: "CropAndResizeGradImage"
+ }
+ summary: "Computes the gradient of the crop_and_resize op wrt the input image tensor."
+}
+op {
+ graph_op_name: "Cross"
+ endpoint {
+ name: "Cross"
+ }
+ summary: "Compute the pairwise cross product."
+ description: <<END
+`a` and `b` must be the same shape; they can either be simple 3-element vectors,
+or any shape where the innermost dimension is 3. In the latter case, each pair
+of corresponding 3-element vectors is cross-multiplied independently.
+END
+}
+op {
+ graph_op_name: "Cumprod"
+ endpoint {
+ name: "Cumprod"
+ }
+ summary: "Compute the cumulative product of the tensor `x` along `axis`."
+ description: <<END
+By default, this op performs an inclusive cumprod, which means that the first
+element of the input is identical to the first element of the output:
+
+```python
+tf.cumprod([a, b, c]) # => [a, a * b, a * b * c]
+```
+
+By setting the `exclusive` kwarg to `True`, an exclusive cumprod is
+performed instead:
+
+```python
+tf.cumprod([a, b, c], exclusive=True) # => [1, a, a * b]
+```
+
+By setting the `reverse` kwarg to `True`, the cumprod is performed in the
+opposite direction:
+
+```python
+tf.cumprod([a, b, c], reverse=True) # => [a * b * c, b * c, c]
+```
+
+This is more efficient than using separate `tf.reverse` ops.
+
+The `reverse` and `exclusive` kwargs can also be combined:
+
+```python
+tf.cumprod([a, b, c], exclusive=True, reverse=True) # => [b * c, c, 1]
+```
+END
+}
+op {
+ graph_op_name: "Cumsum"
+ endpoint {
+ name: "Cumsum"
+ }
+ summary: "Compute the cumulative sum of the tensor `x` along `axis`."
+ description: <<END
+By default, this op performs an inclusive cumsum, which means that the first
+element of the input is identical to the first element of the output:
+
+```python
+tf.cumsum([a, b, c]) # => [a, a + b, a + b + c]
+```
+
+By setting the `exclusive` kwarg to `True`, an exclusive cumsum is
+performed instead:
+
+```python
+tf.cumsum([a, b, c], exclusive=True) # => [0, a, a + b]
+```
+
+By setting the `reverse` kwarg to `True`, the cumsum is performed in the
+opposite direction:
+
+```python
+tf.cumsum([a, b, c], reverse=True) # => [a + b + c, b + c, c]
+```
+
+This is more efficient than using separate `tf.reverse` ops.
+
+The `reverse` and `exclusive` kwargs can also be combined:
+
+```python
+tf.cumsum([a, b, c], exclusive=True, reverse=True) # => [b + c, c, 0]
+```
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_D.pbtxt b/tensorflow/core/api_def/base_api/api_def_D.pbtxt
new file mode 100644
index 0000000000..ff8a7223c7
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_D.pbtxt
@@ -0,0 +1,790 @@
+op {
+ graph_op_name: "DebugGradientIdentity"
+ endpoint {
+ name: "DebugGradientIdentity"
+ }
+ summary: "Identity op for gradient debugging."
+ description: <<END
+This op is hidden from public in Python. It is used by TensorFlow Debugger to
+register gradient tensors for gradient debugging.
+END
+}
+op {
+ graph_op_name: "DecodeAndCropJpeg"
+ endpoint {
+ name: "DecodeAndCropJpeg"
+ }
+ summary: "Decode and Crop a JPEG-encoded image to a uint8 tensor."
+ description: <<END
+The attr `channels` indicates the desired number of color channels for the
+decoded image.
+
+Accepted values are:
+
+* 0: Use the number of channels in the JPEG-encoded image.
+* 1: output a grayscale image.
+* 3: output an RGB image.
+
+If needed, the JPEG-encoded image is transformed to match the requested number
+of color channels.
+
+The attr `ratio` allows downscaling the image by an integer factor during
+decoding. Allowed values are: 1, 2, 4, and 8. This is much faster than
+downscaling the image later.
+
+
+It is equivalent to a combination of decode and crop, but much faster by only
+decoding partial jpeg image.
+END
+}
+op {
+ graph_op_name: "DecodeBase64"
+ endpoint {
+ name: "DecodeBase64"
+ }
+ summary: "Decode web-safe base64-encoded strings."
+ description: <<END
+Input may or may not have padding at the end. See EncodeBase64 for padding.
+Web-safe means that input must use - and _ instead of + and /.
+END
+}
+op {
+ graph_op_name: "DecodeBmp"
+ endpoint {
+ name: "DecodeBmp"
+ }
+ summary: "Decode the first frame of a BMP-encoded image to a uint8 tensor."
+ description: <<END
+The attr `channels` indicates the desired number of color channels for the
+decoded image.
+
+Accepted values are:
+
+* 0: Use the number of channels in the BMP-encoded image.
+* 3: output an RGB image.
+* 4: output an RGBA image.
+END
+}
+op {
+ graph_op_name: "DecodeCSV"
+ endpoint {
+ name: "DecodeCSV"
+ }
+ summary: "Convert CSV records to tensors. Each column maps to one tensor."
+ description: <<END
+RFC 4180 format is expected for the CSV records.
+(https://tools.ietf.org/html/rfc4180)
+Note that we allow leading and trailing spaces with int or float field.
+END
+}
+op {
+ graph_op_name: "DecodeGif"
+ endpoint {
+ name: "DecodeGif"
+ }
+ summary: "Decode the first frame of a GIF-encoded image to a uint8 tensor."
+ description: <<END
+GIF with frame or transparency compression are not supported
+convert animated GIF from compressed to uncompressed by:
+
+ convert $src.gif -coalesce $dst.gif
+
+This op also supports decoding JPEGs and PNGs, though it is cleaner to use
+`tf.image.decode_image`.
+END
+}
+op {
+ graph_op_name: "DecodeJSONExample"
+ endpoint {
+ name: "DecodeJSONExample"
+ }
+ summary: "Convert JSON-encoded Example records to binary protocol buffer strings."
+ description: <<END
+This op translates a tensor containing Example records, encoded using
+the [standard JSON
+mapping](https://developers.google.com/protocol-buffers/docs/proto3#json),
+into a tensor containing the same records encoded as binary protocol
+buffers. The resulting tensor can then be fed to any of the other
+Example-parsing ops.
+END
+}
+op {
+ graph_op_name: "DecodeJpeg"
+ endpoint {
+ name: "DecodeJpeg"
+ }
+ summary: "Decode a JPEG-encoded image to a uint8 tensor."
+ description: <<END
+The attr `channels` indicates the desired number of color channels for the
+decoded image.
+
+Accepted values are:
+
+* 0: Use the number of channels in the JPEG-encoded image.
+* 1: output a grayscale image.
+* 3: output an RGB image.
+
+If needed, the JPEG-encoded image is transformed to match the requested number
+of color channels.
+
+The attr `ratio` allows downscaling the image by an integer factor during
+decoding. Allowed values are: 1, 2, 4, and 8. This is much faster than
+downscaling the image later.
+
+
+This op also supports decoding PNGs and non-animated GIFs since the interface is
+the same, though it is cleaner to use `tf.image.decode_image`.
+END
+}
+op {
+ graph_op_name: "DecodePng"
+ endpoint {
+ name: "DecodePng"
+ }
+ summary: "Decode a PNG-encoded image to a uint8 or uint16 tensor."
+ description: <<END
+The attr `channels` indicates the desired number of color channels for the
+decoded image.
+
+Accepted values are:
+
+* 0: Use the number of channels in the PNG-encoded image.
+* 1: output a grayscale image.
+* 3: output an RGB image.
+* 4: output an RGBA image.
+
+If needed, the PNG-encoded image is transformed to match the requested number
+of color channels.
+
+This op also supports decoding JPEGs and non-animated GIFs since the interface
+is the same, though it is cleaner to use `tf.image.decode_image`.
+END
+}
+op {
+ graph_op_name: "DecodeRaw"
+ endpoint {
+ name: "DecodeRaw"
+ }
+ summary: "Reinterpret the bytes of a string as a vector of numbers."
+}
+op {
+ graph_op_name: "DecodeWav"
+ endpoint {
+ name: "DecodeWav"
+ }
+ summary: "Decode a 16-bit PCM WAV file to a float tensor."
+ description: <<END
+The -32768 to 32767 signed 16-bit values will be scaled to -1.0 to 1.0 in float.
+
+When desired_channels is set, if the input contains fewer channels than this
+then the last channel will be duplicated to give the requested number, else if
+the input has more channels than requested then the additional channels will be
+ignored.
+
+If desired_samples is set, then the audio will be cropped or padded with zeroes
+to the requested length.
+
+The first output contains a Tensor with the content of the audio samples. The
+lowest dimension will be the number of channels, and the second will be the
+number of samples. For example, a ten-sample-long stereo WAV file should give an
+output shape of [10, 2].
+END
+}
+op {
+ graph_op_name: "DeleteSessionTensor"
+ endpoint {
+ name: "DeleteSessionTensor"
+ }
+ summary: "Delete the tensor specified by its handle in the session."
+}
+op {
+ graph_op_name: "DenseToDenseSetOperation"
+ endpoint {
+ name: "DenseToDenseSetOperation"
+ }
+ summary: "Applies set operation along last dimension of 2 `Tensor` inputs."
+ description: <<END
+See SetOperationOp::SetOperationFromContext for values of `set_operation`.
+
+Output `result` is a `SparseTensor` represented by `result_indices`,
+`result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this
+has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth`
+dimension contains the result of `set_operation` applied to the corresponding
+`[0...n-1]` dimension of `set`.
+END
+}
+op {
+ graph_op_name: "DenseToSparseBatchDataset"
+ endpoint {
+ name: "DenseToSparseBatchDataset"
+ }
+ summary: "Creates a dataset that yields a SparseTensor for each element of the input."
+}
+op {
+ graph_op_name: "DenseToSparseSetOperation"
+ endpoint {
+ name: "DenseToSparseSetOperation"
+ }
+ summary: "Applies set operation along last dimension of `Tensor` and `SparseTensor`."
+ description: <<END
+See SetOperationOp::SetOperationFromContext for values of `set_operation`.
+
+Input `set2` is a `SparseTensor` represented by `set2_indices`, `set2_values`,
+and `set2_shape`. For `set2` ranked `n`, 1st `n-1` dimensions must be the same
+as `set1`. Dimension `n` contains values in a set, duplicates are allowed but
+ignored.
+
+If `validate_indices` is `True`, this op validates the order and range of `set2`
+indices.
+
+Output `result` is a `SparseTensor` represented by `result_indices`,
+`result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this
+has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth`
+dimension contains the result of `set_operation` applied to the corresponding
+`[0...n-1]` dimension of `set`.
+END
+}
+op {
+ graph_op_name: "DepthToSpace"
+ endpoint {
+ name: "DepthToSpace"
+ }
+ summary: "DepthToSpace for tensors of type T."
+ description: <<END
+Rearranges data from depth into blocks of spatial data.
+This is the reverse transformation of SpaceToDepth. More specifically,
+this op outputs a copy of the input tensor where values from the `depth`
+dimension are moved in spatial blocks to the `height` and `width` dimensions.
+The attr `block_size` indicates the input block size and how the data is moved.
+
+ * Chunks of data of size `block_size * block_size` from depth are rearranged
+ into non-overlapping blocks of size `block_size x block_size`
+ * The width the output tensor is `input_depth * block_size`, whereas the
+ height is `input_height * block_size`.
+ * The Y, X coordinates within each block of the output image are determined
+ by the high order component of the input channel index.
+ * The depth of the input tensor must be divisible by
+ `block_size * block_size`.
+
+The `data_format` attr specifies the layout of the input and output tensors
+with the following options:
+ "NHWC": `[ batch, height, width, channels ]`
+ "NCHW": `[ batch, channels, height, width ]`
+ "NCHW_VECT_C":
+ `qint8 [ batch, channels / 4, height, width, channels % 4 ]`
+
+It is useful to consider the operation as transforming a 6-D Tensor.
+e.g. for data_format = NHWC,
+ Each element in the input tensor can be specified via 6 coordinates,
+ ordered by decreasing memory layout significance as:
+ n,iY,iX,bY,bX,oC (where n=batch index, iX, iY means X or Y coordinates
+ within the input image, bX, bY means coordinates
+ within the output block, oC means output channels).
+ The output would be the input transposed to the following layout:
+ n,iY,bY,iX,bX,oC
+
+This operation is useful for resizing the activations between convolutions
+(but keeping all data), e.g. instead of pooling. It is also useful for training
+purely convolutional models.
+
+For example, given an input of shape `[1, 1, 1, 4]`, data_format = "NHWC" and
+block_size = 2:
+
+```
+x = [[[[1, 2, 3, 4]]]]
+
+```
+
+This operation will output a tensor of shape `[1, 2, 2, 1]`:
+
+```
+ [[[[1], [2]],
+ [[3], [4]]]]
+```
+
+Here, the input has a batch of 1 and each batch element has shape `[1, 1, 4]`,
+the corresponding output will have 2x2 elements and will have a depth of
+1 channel (1 = `4 / (block_size * block_size)`).
+The output element shape is `[2, 2, 1]`.
+
+For an input tensor with larger depth, here of shape `[1, 1, 1, 12]`, e.g.
+
+```
+x = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]
+```
+
+This operation, for block size of 2, will return the following tensor of shape
+`[1, 2, 2, 3]`
+
+```
+ [[[[1, 2, 3], [4, 5, 6]],
+ [[7, 8, 9], [10, 11, 12]]]]
+
+```
+
+Similarly, for the following input of shape `[1 2 2 4]`, and a block size of 2:
+
+```
+x = [[[[1, 2, 3, 4],
+ [5, 6, 7, 8]],
+ [[9, 10, 11, 12],
+ [13, 14, 15, 16]]]]
+```
+
+the operator will return the following tensor of shape `[1 4 4 1]`:
+
+```
+x = [[[ [1], [2], [5], [6]],
+ [ [3], [4], [7], [8]],
+ [ [9], [10], [13], [14]],
+ [ [11], [12], [15], [16]]]]
+
+```
+END
+}
+op {
+ graph_op_name: "DepthwiseConv2dNative"
+ endpoint {
+ name: "DepthwiseConv2dNative"
+ }
+ summary: "Computes a 2-D depthwise convolution given 4-D `input` and `filter` tensors."
+ description: <<END
+Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
+and a filter / kernel tensor of shape
+`[filter_height, filter_width, in_channels, channel_multiplier]`, containing
+`in_channels` convolutional filters of depth 1, `depthwise_conv2d` applies
+a different filter to each input channel (expanding from 1 channel to
+`channel_multiplier` channels for each), then concatenates the results
+together. Thus, the output has `in_channels * channel_multiplier` channels.
+
+```
+for k in 0..in_channels-1
+ for q in 0..channel_multiplier-1
+ output[b, i, j, k * channel_multiplier + q] =
+ sum_{di, dj} input[b, strides[1] * i + di, strides[2] * j + dj, k] *
+ filter[di, dj, k, q]
+```
+
+Must have `strides[0] = strides[3] = 1`. For the most common case of the same
+horizontal and vertices strides, `strides = [1, stride, stride, 1]`.
+END
+}
+op {
+ graph_op_name: "DepthwiseConv2dNativeBackpropFilter"
+ endpoint {
+ name: "DepthwiseConv2dNativeBackpropFilter"
+ }
+ summary: "Computes the gradients of depthwise convolution with respect to the filter."
+}
+op {
+ graph_op_name: "DepthwiseConv2dNativeBackpropInput"
+ endpoint {
+ name: "DepthwiseConv2dNativeBackpropInput"
+ }
+ summary: "Computes the gradients of depthwise convolution with respect to the input."
+}
+op {
+ graph_op_name: "Dequantize"
+ endpoint {
+ name: "Dequantize"
+ }
+ summary: "Dequantize the \'input\' tensor into a float Tensor."
+ description: <<END
+[min_range, max_range] are scalar floats that specify the range for
+the 'input' data. The 'mode' attribute controls exactly which calculations are
+used to convert the float values to their quantized equivalents.
+
+In 'MIN_COMBINED' mode, each value of the tensor will undergo the following:
+
+```
+if T == qint8, in[i] += (range(T) + 1)/ 2.0
+out[i] = min_range + (in[i]* (max_range - min_range) / range(T))
+```
+here `range(T) = numeric_limits<T>::max() - numeric_limits<T>::min()`
+
+*MIN_COMBINED Mode Example*
+
+If the input comes from a QuantizedRelu6, the output type is
+quint8 (range of 0-255) but the possible range of QuantizedRelu6 is
+0-6. The min_range and max_range values are therefore 0.0 and 6.0.
+Dequantize on quint8 will take each value, cast to float, and multiply
+by 6 / 255.
+Note that if quantizedtype is qint8, the operation will additionally add
+each value by 128 prior to casting.
+
+If the mode is 'MIN_FIRST', then this approach is used:
+
+```c++
+number_of_steps = 1 << (# of bits in T)
+range_adjust = number_of_steps / (number_of_steps - 1)
+range = (range_max - range_min) * range_adjust
+range_scale = range / number_of_steps
+const double offset_input = static_cast<double>(input) - lowest_quantized;
+result = range_min + ((input - numeric_limits<T>::min()) * range_scale)
+```
+
+*SCALED mode Example*
+
+`SCALED` mode matches the quantization approach used in
+`QuantizeAndDequantize{V2|V3}`.
+
+If the mode is `SCALED`, we do not use the full range of the output type,
+choosing to elide the lowest possible value for symmetry (e.g., output range is
+-127 to 127, not -128 to 127 for signed 8 bit quantization), so that 0.0 maps to
+0.
+
+We first find the range of values in our tensor. The
+range we use is always centered on 0, so we find m such that
+```c++
+ m = max(abs(input_min), abs(input_max))
+```
+
+Our input tensor range is then `[-m, m]`.
+
+Next, we choose our fixed-point quantization buckets, `[min_fixed, max_fixed]`.
+If T is signed, this is
+```
+ num_bits = sizeof(T) * 8
+ [min_fixed, max_fixed] =
+ [-(1 << (num_bits - 1) - 1), (1 << (num_bits - 1)) - 1]
+```
+
+Otherwise, if T is unsigned, the fixed-point range is
+```
+ [min_fixed, max_fixed] = [0, (1 << num_bits) - 1]
+```
+
+From this we compute our scaling factor, s:
+```c++
+ s = (2 * m) / (max_fixed - min_fixed)
+```
+
+Now we can dequantize the elements of our tensor:
+```c++
+result = input * s
+```
+END
+}
+op {
+ graph_op_name: "DeserializeManySparse"
+ endpoint {
+ name: "DeserializeManySparse"
+ }
+ summary: "Deserialize and concatenate `SparseTensors` from a serialized minibatch."
+ description: <<END
+The input `serialized_sparse` must be a string matrix of shape `[N x 3]` where
+`N` is the minibatch size and the rows correspond to packed outputs of
+`SerializeSparse`. The ranks of the original `SparseTensor` objects
+must all match. When the final `SparseTensor` is created, it has rank one
+higher than the ranks of the incoming `SparseTensor` objects
+(they have been concatenated along a new row dimension).
+
+The output `SparseTensor` object's shape values for all dimensions but the
+first are the max across the input `SparseTensor` objects' shape values
+for the corresponding dimensions. Its first shape value is `N`, the minibatch
+size.
+
+The input `SparseTensor` objects' indices are assumed ordered in
+standard lexicographic order. If this is not the case, after this
+step run `SparseReorder` to restore index ordering.
+
+For example, if the serialized input is a `[2 x 3]` matrix representing two
+original `SparseTensor` objects:
+
+ index = [ 0]
+ [10]
+ [20]
+ values = [1, 2, 3]
+ shape = [50]
+
+and
+
+ index = [ 2]
+ [10]
+ values = [4, 5]
+ shape = [30]
+
+then the final deserialized `SparseTensor` will be:
+
+ index = [0 0]
+ [0 10]
+ [0 20]
+ [1 2]
+ [1 10]
+ values = [1, 2, 3, 4, 5]
+ shape = [2 50]
+END
+}
+op {
+ graph_op_name: "DestroyTemporaryVariable"
+ endpoint {
+ name: "DestroyTemporaryVariable"
+ }
+ summary: "Destroys the temporary variable and returns its final value."
+ description: <<END
+Sets output to the value of the Tensor pointed to by 'ref', then destroys
+the temporary variable called 'var_name'.
+All other uses of 'ref' *must* have executed before this op.
+This is typically achieved by chaining the ref through each assign op, or by
+using control dependencies.
+
+Outputs the final value of the tensor pointed to by 'ref'.
+END
+}
+op {
+ graph_op_name: "Diag"
+ endpoint {
+ name: "Diag"
+ }
+ summary: "Returns a diagonal tensor with a given diagonal values."
+ description: <<END
+Given a `diagonal`, this operation returns a tensor with the `diagonal` and
+everything else padded with zeros. The diagonal is computed as follows:
+
+Assume `diagonal` has dimensions [D1,..., Dk], then the output is a tensor of
+rank 2k with dimensions [D1,..., Dk, D1,..., Dk] where:
+
+`output[i1,..., ik, i1,..., ik] = diagonal[i1, ..., ik]` and 0 everywhere else.
+
+For example:
+
+```
+# 'diagonal' is [1, 2, 3, 4]
+tf.diag(diagonal) ==> [[1, 0, 0, 0]
+ [0, 2, 0, 0]
+ [0, 0, 3, 0]
+ [0, 0, 0, 4]]
+```
+END
+}
+op {
+ graph_op_name: "DiagPart"
+ endpoint {
+ name: "DiagPart"
+ }
+ summary: "Returns the diagonal part of the tensor."
+ description: <<END
+This operation returns a tensor with the `diagonal` part
+of the `input`. The `diagonal` part is computed as follows:
+
+Assume `input` has dimensions `[D1,..., Dk, D1,..., Dk]`, then the output is a
+tensor of rank `k` with dimensions `[D1,..., Dk]` where:
+
+`diagonal[i1,..., ik] = input[i1, ..., ik, i1,..., ik]`.
+
+For example:
+
+```
+# 'input' is [[1, 0, 0, 0]
+ [0, 2, 0, 0]
+ [0, 0, 3, 0]
+ [0, 0, 0, 4]]
+
+tf.diag_part(input) ==> [1, 2, 3, 4]
+```
+END
+}
+op {
+ graph_op_name: "Digamma"
+ endpoint {
+ name: "Digamma"
+ }
+ summary: "Computes Psi, the derivative of Lgamma (the log of the absolute value of"
+ description: <<END
+`Gamma(x)`), element-wise.
+END
+}
+op {
+ graph_op_name: "Dilation2D"
+ endpoint {
+ name: "Dilation2D"
+ }
+ summary: "Computes the grayscale dilation of 4-D `input` and 3-D `filter` tensors."
+ description: <<END
+The `input` tensor has shape `[batch, in_height, in_width, depth]` and the
+`filter` tensor has shape `[filter_height, filter_width, depth]`, i.e., each
+input channel is processed independently of the others with its own structuring
+function. The `output` tensor has shape
+`[batch, out_height, out_width, depth]`. The spatial dimensions of the output
+tensor depend on the `padding` algorithm. We currently only support the default
+"NHWC" `data_format`.
+
+In detail, the grayscale morphological 2-D dilation is the max-sum correlation
+(for consistency with `conv2d`, we use unmirrored filters):
+
+ output[b, y, x, c] =
+ max_{dy, dx} input[b,
+ strides[1] * y + rates[1] * dy,
+ strides[2] * x + rates[2] * dx,
+ c] +
+ filter[dy, dx, c]
+
+Max-pooling is a special case when the filter has size equal to the pooling
+kernel size and contains all zeros.
+
+Note on duality: The dilation of `input` by the `filter` is equal to the
+negation of the erosion of `-input` by the reflected `filter`.
+END
+}
+op {
+ graph_op_name: "Dilation2DBackpropFilter"
+ endpoint {
+ name: "Dilation2DBackpropFilter"
+ }
+ summary: "Computes the gradient of morphological 2-D dilation with respect to the filter."
+}
+op {
+ graph_op_name: "Dilation2DBackpropInput"
+ endpoint {
+ name: "Dilation2DBackpropInput"
+ }
+ summary: "Computes the gradient of morphological 2-D dilation with respect to the input."
+}
+op {
+ graph_op_name: "Div"
+ endpoint {
+ name: "Div"
+ }
+ summary: "Returns x / y element-wise."
+ description: <<END
+*NOTE*: `Div` supports broadcasting. More about broadcasting
+[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+END
+}
+op {
+ graph_op_name: "DrawBoundingBoxes"
+ endpoint {
+ name: "DrawBoundingBoxes"
+ }
+ summary: "Draw bounding boxes on a batch of images."
+ description: <<END
+Outputs a copy of `images` but draws on top of the pixels zero or more bounding
+boxes specified by the locations in `boxes`. The coordinates of the each
+bounding box in `boxes` are encoded as `[y_min, x_min, y_max, x_max]`. The
+bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and
+height of the underlying image.
+
+For example, if an image is 100 x 200 pixels (height x width) and the bounding
+box is `[0.1, 0.2, 0.5, 0.9]`, the upper-left and bottom-right coordinates of
+the bounding box will be `(40, 10)` to `(100, 50)` (in (x,y) coordinates).
+
+Parts of the bounding box may fall outside the image.
+END
+}
+op {
+ graph_op_name: "DynamicPartition"
+ endpoint {
+ name: "DynamicPartition"
+ }
+ summary: "Partitions `data` into `num_partitions` tensors using indices from `partitions`."
+ description: <<END
+For each index tuple `js` of size `partitions.ndim`, the slice `data[js, ...]`
+becomes part of `outputs[partitions[js]]`. The slices with `partitions[js] = i`
+are placed in `outputs[i]` in lexicographic order of `js`, and the first
+dimension of `outputs[i]` is the number of entries in `partitions` equal to `i`.
+In detail,
+
+```python
+ outputs[i].shape = [sum(partitions == i)] + data.shape[partitions.ndim:]
+
+ outputs[i] = pack([data[js, ...] for js if partitions[js] == i])
+```
+
+`data.shape` must start with `partitions.shape`.
+
+For example:
+
+```python
+ # Scalar partitions.
+ partitions = 1
+ num_partitions = 2
+ data = [10, 20]
+ outputs[0] = [] # Empty with shape [0, 2]
+ outputs[1] = [[10, 20]]
+
+ # Vector partitions.
+ partitions = [0, 0, 1, 1, 0]
+ num_partitions = 2
+ data = [10, 20, 30, 40, 50]
+ outputs[0] = [10, 20, 50]
+ outputs[1] = [30, 40]
+```
+
+See `dynamic_stitch` for an example on how to merge partitions back.
+
+<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
+<img style="width:100%" src="https://www.tensorflow.org/images/DynamicPartition.png" alt>
+</div>
+END
+}
+op {
+ graph_op_name: "DynamicStitch"
+ endpoint {
+ name: "DynamicStitch"
+ }
+ summary: "Interleave the values from the `data` tensors into a single tensor."
+ description: <<END
+Builds a merged tensor such that
+
+```python
+ merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...]
+```
+
+For example, if each `indices[m]` is scalar or vector, we have
+
+```python
+ # Scalar indices:
+ merged[indices[m], ...] = data[m][...]
+
+ # Vector indices:
+ merged[indices[m][i], ...] = data[m][i, ...]
+```
+
+Each `data[i].shape` must start with the corresponding `indices[i].shape`,
+and the rest of `data[i].shape` must be constant w.r.t. `i`. That is, we
+must have `data[i].shape = indices[i].shape + constant`. In terms of this
+`constant`, the output shape is
+
+ merged.shape = [max(indices)] + constant
+
+Values are merged in order, so if an index appears in both `indices[m][i]` and
+`indices[n][j]` for `(m,i) < (n,j)` the slice `data[n][j]` will appear in the
+merged result. If you do not need this guarantee, ParallelDynamicStitch might
+perform better on some devices.
+
+For example:
+
+```python
+ indices[0] = 6
+ indices[1] = [4, 1]
+ indices[2] = [[5, 2], [0, 3]]
+ data[0] = [61, 62]
+ data[1] = [[41, 42], [11, 12]]
+ data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]]
+ merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42],
+ [51, 52], [61, 62]]
+```
+
+This method can be used to merge partitions created by `dynamic_partition`
+as illustrated on the following example:
+
+```python
+ # Apply function (increments x_i) on elements for which a certain condition
+ # apply (x_i != -1 in this example).
+ x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4])
+ condition_mask=tf.not_equal(x,tf.constant(-1.))
+ partitioned_data = tf.dynamic_partition(
+ x, tf.cast(condition_mask, tf.int32) , 2)
+ partitioned_data[1] = partitioned_data[1] + 1.0
+ condition_indices = tf.dynamic_partition(
+ tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2)
+ x = tf.dynamic_stitch(condition_indices, partitioned_data)
+ # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain
+ # unchanged.
+```
+
+<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
+<img style="width:100%" src="https://www.tensorflow.org/images/DynamicStitch.png" alt>
+</div>
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_E.pbtxt b/tensorflow/core/api_def/base_api/api_def_E.pbtxt
new file mode 100644
index 0000000000..b49146f7c4
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_E.pbtxt
@@ -0,0 +1,261 @@
+op {
+ graph_op_name: "EditDistance"
+ endpoint {
+ name: "EditDistance"
+ }
+ summary: "Computes the (possibly normalized) Levenshtein Edit Distance."
+ description: <<END
+The inputs are variable-length sequences provided by SparseTensors
+ (hypothesis_indices, hypothesis_values, hypothesis_shape)
+and
+ (truth_indices, truth_values, truth_shape).
+
+The inputs are:
+END
+}
+op {
+ graph_op_name: "Elu"
+ endpoint {
+ name: "Elu"
+ }
+ summary: "Computes exponential linear: `exp(features) - 1` if < 0, `features` otherwise."
+ description: <<END
+See [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)
+](http://arxiv.org/abs/1511.07289)
+END
+}
+op {
+ graph_op_name: "EluGrad"
+ endpoint {
+ name: "EluGrad"
+ }
+ summary: "Computes gradients for the exponential linear (Elu) operation."
+}
+op {
+ graph_op_name: "EncodeBase64"
+ endpoint {
+ name: "EncodeBase64"
+ }
+ summary: "Encode strings into web-safe base64 format."
+ description: <<END
+Refer to the following article for more information on base64 format:
+en.wikipedia.org/wiki/Base64. Base64 strings may have padding with '=' at the
+end so that the encoded has length multiple of 4. See Padding section of the
+link above.
+
+Web-safe means that the encoder uses - and _ instead of + and /.
+END
+}
+op {
+ graph_op_name: "EncodeJpeg"
+ endpoint {
+ name: "EncodeJpeg"
+ }
+ summary: "JPEG-encode an image."
+ description: <<END
+`image` is a 3-D uint8 Tensor of shape `[height, width, channels]`.
+
+The attr `format` can be used to override the color format of the encoded
+output. Values can be:
+
+* `''`: Use a default format based on the number of channels in the image.
+* `grayscale`: Output a grayscale JPEG image. The `channels` dimension
+ of `image` must be 1.
+* `rgb`: Output an RGB JPEG image. The `channels` dimension
+ of `image` must be 3.
+
+If `format` is not specified or is the empty string, a default format is picked
+in function of the number of channels in `image`:
+
+* 1: Output a grayscale image.
+* 3: Output an RGB image.
+END
+}
+op {
+ graph_op_name: "EncodePng"
+ endpoint {
+ name: "EncodePng"
+ }
+ summary: "PNG-encode an image."
+ description: <<END
+`image` is a 3-D uint8 or uint16 Tensor of shape `[height, width, channels]`
+where `channels` is:
+
+* 1: for grayscale.
+* 2: for grayscale + alpha.
+* 3: for RGB.
+* 4: for RGBA.
+
+The ZLIB compression level, `compression`, can be -1 for the PNG-encoder
+default or a value from 0 to 9. 9 is the highest compression level, generating
+the smallest output, but is slower.
+END
+}
+op {
+ graph_op_name: "EncodeWav"
+ endpoint {
+ name: "EncodeWav"
+ }
+ summary: "Encode audio data using the WAV file format."
+ description: <<END
+This operation will generate a string suitable to be saved out to create a .wav
+audio file. It will be encoded in the 16-bit PCM format. It takes in float
+values in the range -1.0f to 1.0f, and any outside that value will be clamped to
+that range.
+
+`audio` is a 2-D float Tensor of shape `[length, channels]`.
+`sample_rate` is a scalar Tensor holding the rate to use (e.g. 44100).
+END
+}
+op {
+ graph_op_name: "Enter"
+ endpoint {
+ name: "Enter"
+ }
+ summary: "Creates or finds a child frame, and makes `data` available to the child frame."
+ description: <<END
+This op is used together with `Exit` to create loops in the graph.
+The unique `frame_name` is used by the `Executor` to identify frames. If
+`is_constant` is true, `output` is a constant in the child frame; otherwise
+it may be changed in the child frame. At most `parallel_iterations` iterations
+are run in parallel in the child frame.
+END
+}
+op {
+ graph_op_name: "Equal"
+ endpoint {
+ name: "Equal"
+ }
+ summary: "Returns the truth value of (x == y) element-wise."
+ description: <<END
+*NOTE*: `Equal` supports broadcasting. More about broadcasting
+[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+END
+}
+op {
+ graph_op_name: "Erf"
+ endpoint {
+ name: "Erf"
+ }
+ summary: "Computes the Gauss error function of `x` element-wise."
+}
+op {
+ graph_op_name: "Erfc"
+ endpoint {
+ name: "Erfc"
+ }
+ summary: "Computes the complementary error function of `x` element-wise."
+}
+op {
+ graph_op_name: "Exit"
+ endpoint {
+ name: "Exit"
+ }
+ summary: "Exits the current frame to its parent frame."
+ description: <<END
+Exit makes its input `data` available to the parent frame.
+END
+}
+op {
+ graph_op_name: "Exp"
+ endpoint {
+ name: "Exp"
+ }
+ summary: "Computes exponential of x element-wise. \\\\(y = e^x\\\\)."
+}
+op {
+ graph_op_name: "ExpandDims"
+ endpoint {
+ name: "ExpandDims"
+ }
+ summary: "Inserts a dimension of 1 into a tensor\'s shape."
+ description: <<END
+Given a tensor `input`, this operation inserts a dimension of 1 at the
+dimension index `dim` of `input`'s shape. The dimension index `dim` starts at
+zero; if you specify a negative number for `dim` it is counted backward from
+the end.
+
+This operation is useful if you want to add a batch dimension to a single
+element. For example, if you have a single image of shape `[height, width,
+channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`,
+which will make the shape `[1, height, width, channels]`.
+
+Other examples:
+
+```
+# 't' is a tensor of shape [2]
+shape(expand_dims(t, 0)) ==> [1, 2]
+shape(expand_dims(t, 1)) ==> [2, 1]
+shape(expand_dims(t, -1)) ==> [2, 1]
+
+# 't2' is a tensor of shape [2, 3, 5]
+shape(expand_dims(t2, 0)) ==> [1, 2, 3, 5]
+shape(expand_dims(t2, 2)) ==> [2, 3, 1, 5]
+shape(expand_dims(t2, 3)) ==> [2, 3, 5, 1]
+```
+
+This operation requires that:
+
+`-1-input.dims() <= dim <= input.dims()`
+
+This operation is related to `squeeze()`, which removes dimensions of
+size 1.
+END
+}
+op {
+ graph_op_name: "Expm1"
+ endpoint {
+ name: "Expm1"
+ }
+ summary: "Computes exponential of x - 1 element-wise."
+ description: <<END
+I.e., \\(y = (\exp x) - 1\\).
+END
+}
+op {
+ graph_op_name: "ExtractGlimpse"
+ endpoint {
+ name: "ExtractGlimpse"
+ }
+ summary: "Extracts a glimpse from the input tensor."
+ description: <<END
+Returns a set of windows called glimpses extracted at location
+`offsets` from the input tensor. If the windows only partially
+overlaps the inputs, the non overlapping areas will be filled with
+random noise.
+
+The result is a 4-D tensor of shape `[batch_size, glimpse_height,
+glimpse_width, channels]`. The channels and batch dimensions are the
+same as that of the input tensor. The height and width of the output
+windows are specified in the `size` parameter.
+
+The argument `normalized` and `centered` controls how the windows are built:
+
+* If the coordinates are normalized but not centered, 0.0 and 1.0
+ correspond to the minimum and maximum of each height and width
+ dimension.
+* If the coordinates are both normalized and centered, they range from
+ -1.0 to 1.0. The coordinates (-1.0, -1.0) correspond to the upper
+ left corner, the lower right corner is located at (1.0, 1.0) and the
+ center is at (0, 0).
+* If the coordinates are not normalized they are interpreted as
+ numbers of pixels.
+END
+}
+op {
+ graph_op_name: "ExtractImagePatches"
+ endpoint {
+ name: "ExtractImagePatches"
+ }
+ summary: "Extract `patches` from `images` and put them in the \"depth\" output dimension."
+}
+op {
+ graph_op_name: "ExtractJpegShape"
+ endpoint {
+ name: "ExtractJpegShape"
+ }
+ summary: "Extract the shape information of a JPEG-encoded image."
+ description: <<END
+This op only parses the image header, so it is much faster than DecodeJpeg.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_F.pbtxt b/tensorflow/core/api_def/base_api/api_def_F.pbtxt
new file mode 100644
index 0000000000..8c073d3369
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_F.pbtxt
@@ -0,0 +1,411 @@
+op {
+ graph_op_name: "FFT"
+ endpoint {
+ name: "FFT"
+ }
+ summary: "Fast Fourier transform."
+ description: <<END
+Computes the 1-dimensional discrete Fourier transform over the inner-most
+dimension of `input`.
+END
+}
+op {
+ graph_op_name: "FFT2D"
+ endpoint {
+ name: "FFT2D"
+ }
+ summary: "2D fast Fourier transform."
+ description: <<END
+Computes the 2-dimensional discrete Fourier transform over the inner-most
+2 dimensions of `input`.
+END
+}
+op {
+ graph_op_name: "FFT3D"
+ endpoint {
+ name: "FFT3D"
+ }
+ summary: "3D fast Fourier transform."
+ description: <<END
+Computes the 3-dimensional discrete Fourier transform over the inner-most 3
+dimensions of `input`.
+END
+}
+op {
+ graph_op_name: "FIFOQueue"
+ endpoint {
+ name: "FIFOQueue"
+ }
+ summary: "A queue that produces elements in first-in first-out order."
+}
+op {
+ graph_op_name: "FIFOQueueV2"
+ endpoint {
+ name: "FIFOQueueV2"
+ }
+ summary: "A queue that produces elements in first-in first-out order."
+}
+op {
+ graph_op_name: "Fact"
+ endpoint {
+ name: "Fact"
+ }
+ summary: "Output a fact about factorials."
+}
+op {
+ graph_op_name: "FakeQuantWithMinMaxArgs"
+ endpoint {
+ name: "FakeQuantWithMinMaxArgs"
+ }
+ summary: "Fake-quantize the \'inputs\' tensor, type float to \'outputs\' tensor of same type."
+ description: <<END
+Attributes `[min; max]` define the clamping range for the `inputs` data.
+`inputs` values are quantized into the quantization range (`[0; 2^num_bits - 1]`
+when `narrow_range` is false and `[1; 2^num_bits - 1]` when it is true) and
+then de-quantized and output as floats in `[min; max]` interval.
+`num_bits` is the bitwidth of the quantization; between 2 and 8, inclusive.
+
+Quantization is called fake since the output is still in floating point.
+END
+}
+op {
+ graph_op_name: "FakeQuantWithMinMaxArgsGradient"
+ endpoint {
+ name: "FakeQuantWithMinMaxArgsGradient"
+ }
+ summary: "Compute gradients for a FakeQuantWithMinMaxArgs operation."
+}
+op {
+ graph_op_name: "FakeQuantWithMinMaxVars"
+ endpoint {
+ name: "FakeQuantWithMinMaxVars"
+ }
+ summary: "Fake-quantize the \'inputs\' tensor of type float via global float scalars `min`"
+ description: <<END
+and `max` to 'outputs' tensor of same shape as `inputs`.
+
+`[min; max]` define the clamping range for the `inputs` data.
+`inputs` values are quantized into the quantization range (`[0; 2^num_bits - 1]`
+when `narrow_range` is false and `[1; 2^num_bits - 1]` when it is true) and
+then de-quantized and output as floats in `[min; max]` interval.
+`num_bits` is the bitwidth of the quantization; between 2 and 8, inclusive.
+
+This operation has a gradient and thus allows for training `min` and `max`
+values.
+END
+}
+op {
+ graph_op_name: "FakeQuantWithMinMaxVarsGradient"
+ endpoint {
+ name: "FakeQuantWithMinMaxVarsGradient"
+ }
+ summary: "Compute gradients for a FakeQuantWithMinMaxVars operation."
+}
+op {
+ graph_op_name: "FakeQuantWithMinMaxVarsPerChannel"
+ endpoint {
+ name: "FakeQuantWithMinMaxVarsPerChannel"
+ }
+ summary: "Fake-quantize the \'inputs\' tensor of type float and one of the shapes: `[d]`,"
+ description: <<END
+`[b, d]` `[b, h, w, d]` via per-channel floats `min` and `max` of shape `[d]`
+to 'outputs' tensor of same shape as `inputs`.
+
+`[min; max]` define the clamping range for the `inputs` data.
+`inputs` values are quantized into the quantization range (`[0; 2^num_bits - 1]`
+when `narrow_range` is false and `[1; 2^num_bits - 1]` when it is true) and
+then de-quantized and output as floats in `[min; max]` interval.
+`num_bits` is the bitwidth of the quantization; between 2 and 8, inclusive.
+
+This operation has a gradient and thus allows for training `min` and `max`
+values.
+END
+}
+op {
+ graph_op_name: "FakeQuantWithMinMaxVarsPerChannelGradient"
+ endpoint {
+ name: "FakeQuantWithMinMaxVarsPerChannelGradient"
+ }
+ summary: "Compute gradients for a FakeQuantWithMinMaxVarsPerChannel operation."
+}
+op {
+ graph_op_name: "FakeQueue"
+ endpoint {
+ name: "FakeQueue"
+ }
+ summary: "Deprecated. Do not use."
+}
+op {
+ graph_op_name: "Fill"
+ endpoint {
+ name: "Fill"
+ }
+ summary: "Creates a tensor filled with a scalar value."
+ description: <<END
+This operation creates a tensor of shape `dims` and fills it with `value`.
+
+For example:
+
+```
+# Output tensor has shape [2, 3].
+fill([2, 3], 9) ==> [[9, 9, 9]
+ [9, 9, 9]]
+```
+END
+}
+op {
+ graph_op_name: "FilterDataset"
+ endpoint {
+ name: "FilterDataset"
+ }
+ summary: "Creates a dataset containing elements of `input_dataset` matching `predicate`."
+ description: <<END
+The `predicate` function must return a scalar boolean and accept the
+following arguments:
+
+* One tensor for each component of an element of `input_dataset`.
+* One tensor for each value in `other_arguments`.
+END
+}
+op {
+ graph_op_name: "FixedLengthRecordDataset"
+ endpoint {
+ name: "FixedLengthRecordDataset"
+ }
+ summary: "Creates a dataset that emits the records from one or more binary files."
+}
+op {
+ graph_op_name: "FixedLengthRecordReader"
+ endpoint {
+ name: "FixedLengthRecordReader"
+ }
+ summary: "A Reader that outputs fixed-length records from a file."
+}
+op {
+ graph_op_name: "FixedLengthRecordReaderV2"
+ endpoint {
+ name: "FixedLengthRecordReaderV2"
+ }
+ summary: "A Reader that outputs fixed-length records from a file."
+}
+op {
+ graph_op_name: "FixedUnigramCandidateSampler"
+ endpoint {
+ name: "FixedUnigramCandidateSampler"
+ }
+ summary: "Generates labels for candidate sampling with a learned unigram distribution."
+ description: <<END
+A unigram sampler could use a fixed unigram distribution read from a
+file or passed in as an in-memory array instead of building up the distribution
+from data on the fly. There is also an option to skew the distribution by
+applying a distortion power to the weights.
+
+The vocabulary file should be in CSV-like format, with the last field
+being the weight associated with the word.
+
+For each batch, this op picks a single set of sampled candidate labels.
+
+The advantages of sampling candidates per-batch are simplicity and the
+possibility of efficient dense matrix multiplication. The disadvantage is that
+the sampled candidates must be chosen independently of the context and of the
+true labels.
+END
+}
+op {
+ graph_op_name: "FlatMapDataset"
+ endpoint {
+ name: "FlatMapDataset"
+ }
+ summary: "Creates a dataset that applies `f` to the outputs of `input_dataset`."
+ description: <<END
+Unlike MapDataset, the `f` in FlatMapDataset is expected to return a
+Dataset variant, and FlatMapDataset will flatten successive results
+into a single Dataset.
+END
+}
+op {
+ graph_op_name: "Floor"
+ endpoint {
+ name: "Floor"
+ }
+ summary: "Returns element-wise largest integer not greater than x."
+}
+op {
+ graph_op_name: "FloorDiv"
+ endpoint {
+ name: "FloorDiv"
+ }
+ summary: "Returns x // y element-wise."
+ description: <<END
+*NOTE*: `FloorDiv` supports broadcasting. More about broadcasting
+[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+END
+}
+op {
+ graph_op_name: "FloorMod"
+ endpoint {
+ name: "FloorMod"
+ }
+ summary: "Returns element-wise remainder of division. When `x < 0` xor `y < 0` is"
+ description: <<END
+true, this follows Python semantics in that the result here is consistent
+with a flooring divide. E.g. `floor(x / y) * y + mod(x, y) = x`.
+
+*NOTE*: `FloorMod` supports broadcasting. More about broadcasting
+[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+END
+}
+op {
+ graph_op_name: "FractionalAvgPool"
+ endpoint {
+ name: "FractionalAvgPool"
+ }
+ summary: "Performs fractional average pooling on the input."
+ description: <<END
+Fractional average pooling is similar to Fractional max pooling in the pooling
+region generation step. The only difference is that after pooling regions are
+generated, a mean operation is performed instead of a max operation in each
+pooling region.
+END
+}
+op {
+ graph_op_name: "FractionalAvgPoolGrad"
+ endpoint {
+ name: "FractionalAvgPoolGrad"
+ }
+ summary: "Computes gradient of the FractionalAvgPool function."
+ description: <<END
+Unlike FractionalMaxPoolGrad, we don't need to find arg_max for
+FractionalAvgPoolGrad, we just need to evenly back-propagate each element of
+out_backprop to those indices that form the same pooling cell. Therefore, we
+just need to know the shape of original input tensor, instead of the whole
+tensor.
+END
+}
+op {
+ graph_op_name: "FractionalMaxPool"
+ endpoint {
+ name: "FractionalMaxPool"
+ }
+ summary: "Performs fractional max pooling on the input."
+ description: <<END
+Fractional max pooling is slightly different than regular max pooling. In
+regular max pooling, you downsize an input set by taking the maximum value of
+smaller N x N subsections of the set (often 2x2), and try to reduce the set by
+a factor of N, where N is an integer. Fractional max pooling, as you might
+expect from the word "fractional", means that the overall reduction ratio N
+does not have to be an integer.
+
+The sizes of the pooling regions are generated randomly but are fairly uniform.
+For example, let's look at the height dimension, and the constraints on the
+list of rows that will be pool boundaries.
+
+First we define the following:
+
+1. input_row_length : the number of rows from the input set
+2. output_row_length : which will be smaller than the input
+3. alpha = input_row_length / output_row_length : our reduction ratio
+4. K = floor(alpha)
+5. row_pooling_sequence : this is the result list of pool boundary rows
+
+Then, row_pooling_sequence should satisfy:
+
+1. a[0] = 0 : the first value of the sequence is 0
+2. a[end] = input_row_length : the last value of the sequence is the size
+3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size
+4. length(row_pooling_sequence) = output_row_length+1
+
+For more details on fractional max pooling, see this paper:
+[Benjamin Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071)
+END
+}
+op {
+ graph_op_name: "FractionalMaxPoolGrad"
+ endpoint {
+ name: "FractionalMaxPoolGrad"
+ }
+ summary: "Computes gradient of the FractionalMaxPool function."
+}
+op {
+ graph_op_name: "FusedBatchNorm"
+ endpoint {
+ name: "FusedBatchNorm"
+ }
+ summary: "Batch normalization."
+ description: <<END
+Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
+The size of 1D Tensors matches the dimension C of the 4D Tensors.
+END
+}
+op {
+ graph_op_name: "FusedBatchNormGrad"
+ endpoint {
+ name: "FusedBatchNormGrad"
+ }
+ summary: "Gradient for batch normalization."
+ description: <<END
+Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
+The size of 1D Tensors matches the dimension C of the 4D Tensors.
+END
+}
+op {
+ graph_op_name: "FusedBatchNormGradV2"
+ endpoint {
+ name: "FusedBatchNormGradV2"
+ }
+ summary: "Gradient for batch normalization."
+ description: <<END
+Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
+The size of 1D Tensors matches the dimension C of the 4D Tensors.
+END
+}
+op {
+ graph_op_name: "FusedBatchNormV2"
+ endpoint {
+ name: "FusedBatchNormV2"
+ }
+ summary: "Batch normalization."
+ description: <<END
+Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
+The size of 1D Tensors matches the dimension C of the 4D Tensors.
+END
+}
+op {
+ graph_op_name: "FusedPadConv2D"
+ endpoint {
+ name: "FusedPadConv2D"
+ }
+ summary: "Performs a padding as a preprocess during a convolution."
+ description: <<END
+Similar to FusedResizeAndPadConv2d, this op allows for an optimized
+implementation where the spatial padding transformation stage is fused with the
+im2col lookup, but in this case without the bilinear filtering required for
+resizing. Fusing the padding prevents the need to write out the intermediate
+results as whole tensors, reducing memory pressure, and we can get some latency
+gains by merging the transformation calculations.
+The data_format attribute for Conv2D isn't supported by this op, and 'NHWC'
+order is used instead.
+Internally this op uses a single per-graph scratch buffer, which means that it
+will block if multiple versions are being run in parallel. This is because this
+operator is primarily an optimization to minimize memory usage.
+END
+}
+op {
+ graph_op_name: "FusedResizeAndPadConv2D"
+ endpoint {
+ name: "FusedResizeAndPadConv2D"
+ }
+ summary: "Performs a resize and padding as a preprocess during a convolution."
+ description: <<END
+It's often possible to do spatial transformations more efficiently as part of
+the packing stage of a convolution, so this op allows for an optimized
+implementation where these stages are fused together. This prevents the need to
+write out the intermediate results as whole tensors, reducing memory pressure,
+and we can get some latency gains by merging the transformation calculations.
+The data_format attribute for Conv2D isn't supported by this op, and defaults to
+'NHWC' order.
+Internally this op uses a single per-graph scratch buffer, which means that it
+will block if multiple versions are being run in parallel. This is because this
+operator is primarily an optimization to minimize memory usage.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_G.pbtxt b/tensorflow/core/api_def/base_api/api_def_G.pbtxt
new file mode 100644
index 0000000000..343d505718
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_G.pbtxt
@@ -0,0 +1,257 @@
+op {
+ graph_op_name: "Gather"
+ endpoint {
+ name: "Gather"
+ }
+ summary: "Gather slices from `params` according to `indices`."
+ description: <<END
+`indices` must be an integer tensor of any dimension (usually 0-D or 1-D).
+Produces an output tensor with shape `indices.shape + params.shape[1:]` where:
+
+```python
+ # Scalar indices
+ output[:, ..., :] = params[indices, :, ... :]
+
+ # Vector indices
+ output[i, :, ..., :] = params[indices[i], :, ... :]
+
+ # Higher rank indices
+ output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :]
+```
+
+If `indices` is a permutation and `len(indices) == params.shape[0]` then
+this operation will permute `params` accordingly.
+
+`validate_indices`: DEPRECATED. If this operation is assigned to CPU, values in
+`indices` are always validated to be within range. If assigned to GPU,
+out-of-bound indices result in safe but unspecified behavior, which may include
+raising an error.
+
+<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
+<img style="width:100%" src="https://www.tensorflow.org/images/Gather.png" alt>
+</div>
+END
+}
+op {
+ graph_op_name: "GatherNd"
+ endpoint {
+ name: "GatherNd"
+ }
+ summary: "Gather slices from `params` into a Tensor with shape specified by `indices`."
+ description: <<END
+`indices` is an K-dimensional integer tensor, best thought of as a
+(K-1)-dimensional tensor of indices into `params`, where each element defines a
+slice of `params`:
+
+ output[i_0, ..., i_{K-2}] = params[indices[i0, ..., i_{K-2}]]
+
+Whereas in @{tf.gather} `indices` defines slices into the first
+dimension of `params`, in `tf.gather_nd`, `indices` defines slices into the
+first `N` dimensions of `params`, where `N = indices.shape[-1]`.
+
+The last dimension of `indices` can be at most the rank of
+`params`:
+
+ indices.shape[-1] <= params.rank
+
+The last dimension of `indices` corresponds to elements
+(if `indices.shape[-1] == params.rank`) or slices
+(if `indices.shape[-1] < params.rank`) along dimension `indices.shape[-1]`
+of `params`. The output tensor has shape
+
+ indices.shape[:-1] + params.shape[indices.shape[-1]:]
+
+Some examples below.
+
+Simple indexing into a matrix:
+
+```python
+ indices = [[0, 0], [1, 1]]
+ params = [['a', 'b'], ['c', 'd']]
+ output = ['a', 'd']
+```
+
+Slice indexing into a matrix:
+
+```python
+ indices = [[1], [0]]
+ params = [['a', 'b'], ['c', 'd']]
+ output = [['c', 'd'], ['a', 'b']]
+```
+
+Indexing into a 3-tensor:
+
+```python
+ indices = [[1]]
+ params = [[['a0', 'b0'], ['c0', 'd0']],
+ [['a1', 'b1'], ['c1', 'd1']]]
+ output = [[['a1', 'b1'], ['c1', 'd1']]]
+
+
+ indices = [[0, 1], [1, 0]]
+ params = [[['a0', 'b0'], ['c0', 'd0']],
+ [['a1', 'b1'], ['c1', 'd1']]]
+ output = [['c0', 'd0'], ['a1', 'b1']]
+
+
+ indices = [[0, 0, 1], [1, 0, 1]]
+ params = [[['a0', 'b0'], ['c0', 'd0']],
+ [['a1', 'b1'], ['c1', 'd1']]]
+ output = ['b0', 'b1']
+```
+
+Batched indexing into a matrix:
+
+```python
+ indices = [[[0, 0]], [[0, 1]]]
+ params = [['a', 'b'], ['c', 'd']]
+ output = [['a'], ['b']]
+```
+
+Batched slice indexing into a matrix:
+
+```python
+ indices = [[[1]], [[0]]]
+ params = [['a', 'b'], ['c', 'd']]
+ output = [[['c', 'd']], [['a', 'b']]]
+```
+
+Batched indexing into a 3-tensor:
+
+```python
+ indices = [[[1]], [[0]]]
+ params = [[['a0', 'b0'], ['c0', 'd0']],
+ [['a1', 'b1'], ['c1', 'd1']]]
+ output = [[[['a1', 'b1'], ['c1', 'd1']]],
+ [[['a0', 'b0'], ['c0', 'd0']]]]
+
+ indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]]
+ params = [[['a0', 'b0'], ['c0', 'd0']],
+ [['a1', 'b1'], ['c1', 'd1']]]
+ output = [[['c0', 'd0'], ['a1', 'b1']],
+ [['a0', 'b0'], ['c1', 'd1']]]
+
+
+ indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]]
+ params = [[['a0', 'b0'], ['c0', 'd0']],
+ [['a1', 'b1'], ['c1', 'd1']]]
+ output = [['b0', 'b1'], ['d0', 'c1']]
+```
+END
+}
+op {
+ graph_op_name: "GatherV2"
+ endpoint {
+ name: "GatherV2"
+ }
+ summary: "Gather slices from `params` axis `axis` according to `indices`."
+ description: <<END
+`indices` must be an integer tensor of any dimension (usually 0-D or 1-D).
+Produces an output tensor with shape `params.shape[:axis] + indices.shape +
+params.shape[axis + 1:]` where:
+
+```python
+ # Scalar indices (output is rank(params) - 1).
+ output[a_0, ..., a_n, b_0, ..., b_n] =
+ params[a_0, ..., a_n, indices, b_0, ..., b_n]
+
+ # Vector indices (output is rank(params)).
+ output[a_0, ..., a_n, i, b_0, ..., b_n] =
+ params[a_0, ..., a_n, indices[i], b_0, ..., b_n]
+
+ # Higher rank indices (output is rank(params) + rank(indices) - 1).
+ output[a_0, ..., a_n, i, ..., j, b_0, ... b_n] =
+ params[a_0, ..., a_n, indices[i, ..., j], b_0, ..., b_n]
+```
+
+<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
+<img style="width:100%" src="https://www.tensorflow.org/images/Gather.png" alt>
+</div>
+END
+}
+op {
+ graph_op_name: "GenerateVocabRemapping"
+ endpoint {
+ name: "GenerateVocabRemapping"
+ }
+ summary: "Given a path to new and old vocabulary files, returns a remapping Tensor of"
+ description: <<END
+length `num_new_vocab`, where `remapping[i]` contains the row number in the old
+vocabulary that corresponds to row `i` in the new vocabulary (starting at line
+`new_vocab_offset` and up to `num_new_vocab` entities), or `-1` if entry `i`
+in the new vocabulary is not in the old vocabulary. `num_vocab_offset` enables
+use in the partitioned variable case, and should generally be set through
+examining partitioning info. The format of the files should be a text file,
+with each line containing a single entity within the vocabulary.
+
+For example, with `new_vocab_file` a text file containing each of the following
+elements on a single line: `[f0, f1, f2, f3]`, old_vocab_file = [f1, f0, f3],
+`num_new_vocab = 3, new_vocab_offset = 1`, the returned remapping would be
+`[0, -1, 2]`.
+
+The op also returns a count of how many entries in the new vocabulary
+were present in the old vocabulary, which is used to calculate the number of
+values to initialize in a weight matrix remapping
+
+This functionality can be used to remap both row vocabularies (typically,
+features) and column vocabularies (typically, classes) from TensorFlow
+checkpoints. Note that the partitioning logic relies on contiguous vocabularies
+corresponding to div-partitioned variables. Moreover, the underlying remapping
+uses an IndexTable (as opposed to an inexact CuckooTable), so client code should
+use the corresponding index_table_from_file() as the FeatureColumn framework
+does (as opposed to tf.feature_to_id(), which uses a CuckooTable).
+END
+}
+op {
+ graph_op_name: "GetSessionHandle"
+ endpoint {
+ name: "GetSessionHandle"
+ }
+ summary: "Store the input tensor in the state of the current session."
+}
+op {
+ graph_op_name: "GetSessionHandleV2"
+ endpoint {
+ name: "GetSessionHandleV2"
+ }
+ summary: "Store the input tensor in the state of the current session."
+}
+op {
+ graph_op_name: "GetSessionTensor"
+ endpoint {
+ name: "GetSessionTensor"
+ }
+ summary: "Get the value of the tensor specified by its handle."
+}
+op {
+ graph_op_name: "Greater"
+ endpoint {
+ name: "Greater"
+ }
+ summary: "Returns the truth value of (x > y) element-wise."
+ description: <<END
+*NOTE*: `Greater` supports broadcasting. More about broadcasting
+[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+END
+}
+op {
+ graph_op_name: "GreaterEqual"
+ endpoint {
+ name: "GreaterEqual"
+ }
+ summary: "Returns the truth value of (x >= y) element-wise."
+ description: <<END
+*NOTE*: `GreaterEqual` supports broadcasting. More about broadcasting
+[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+END
+}
+op {
+ graph_op_name: "GroupByWindowDataset"
+ endpoint {
+ name: "GroupByWindowDataset"
+ }
+ summary: "Creates a dataset that computes a windowed group-by on `input_dataset`."
+ description: <<END
+// TODO(mrry): Support non-int64 keys.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_H.pbtxt b/tensorflow/core/api_def/base_api/api_def_H.pbtxt
new file mode 100644
index 0000000000..71282e7def
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_H.pbtxt
@@ -0,0 +1,52 @@
+op {
+ graph_op_name: "HSVToRGB"
+ endpoint {
+ name: "HSVToRGB"
+ }
+ summary: "Convert one or more images from HSV to RGB."
+ description: <<END
+Outputs a tensor of the same shape as the `images` tensor, containing the RGB
+value of the pixels. The output is only well defined if the value in `images`
+are in `[0,1]`.
+
+See `rgb_to_hsv` for a description of the HSV encoding.
+END
+}
+op {
+ graph_op_name: "HashTable"
+ endpoint {
+ name: "HashTable"
+ }
+ summary: "Creates a non-initialized hash table."
+ description: <<END
+This op creates a hash table, specifying the type of its keys and values.
+Before using the table you will have to initialize it. After initialization the
+table will be immutable.
+END
+}
+op {
+ graph_op_name: "HashTableV2"
+ endpoint {
+ name: "HashTableV2"
+ }
+ summary: "Creates a non-initialized hash table."
+ description: <<END
+This op creates a hash table, specifying the type of its keys and values.
+Before using the table you will have to initialize it. After initialization the
+table will be immutable.
+END
+}
+op {
+ graph_op_name: "HistogramSummary"
+ endpoint {
+ name: "HistogramSummary"
+ }
+ summary: "Outputs a `Summary` protocol buffer with a histogram."
+ description: <<END
+The generated
+[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
+has one summary value containing a histogram for `values`.
+
+This op reports an `InvalidArgument` error if any value is not finite.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_I.pbtxt b/tensorflow/core/api_def/base_api/api_def_I.pbtxt
new file mode 100644
index 0000000000..caaf93bf88
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_I.pbtxt
@@ -0,0 +1,518 @@
+op {
+ graph_op_name: "IFFT"
+ endpoint {
+ name: "IFFT"
+ }
+ summary: "Inverse fast Fourier transform."
+ description: <<END
+Computes the inverse 1-dimensional discrete Fourier transform over the
+inner-most dimension of `input`.
+END
+}
+op {
+ graph_op_name: "IFFT2D"
+ endpoint {
+ name: "IFFT2D"
+ }
+ summary: "Inverse 2D fast Fourier transform."
+ description: <<END
+Computes the inverse 2-dimensional discrete Fourier transform over the
+inner-most 2 dimensions of `input`.
+END
+}
+op {
+ graph_op_name: "IFFT3D"
+ endpoint {
+ name: "IFFT3D"
+ }
+ summary: "Inverse 3D fast Fourier transform."
+ description: <<END
+Computes the inverse 3-dimensional discrete Fourier transform over the
+inner-most 3 dimensions of `input`.
+END
+}
+op {
+ graph_op_name: "IRFFT"
+ endpoint {
+ name: "IRFFT"
+ }
+ summary: "Inverse real-valued fast Fourier transform."
+ description: <<END
+Computes the inverse 1-dimensional discrete Fourier transform of a real-valued
+signal over the inner-most dimension of `input`.
+
+The inner-most dimension of `input` is assumed to be the result of `RFFT`: the
+`fft_length / 2 + 1` unique components of the DFT of a real-valued signal. If
+`fft_length` is not provided, it is computed from the size of the inner-most
+dimension of `input` (`fft_length = 2 * (inner - 1)`). If the FFT length used to
+compute `input` is odd, it should be provided since it cannot be inferred
+properly.
+
+Along the axis `IRFFT` is computed on, if `fft_length / 2 + 1` is smaller
+than the corresponding dimension of `input`, the dimension is cropped. If it is
+larger, the dimension is padded with zeros.
+END
+}
+op {
+ graph_op_name: "IRFFT2D"
+ endpoint {
+ name: "IRFFT2D"
+ }
+ summary: "Inverse 2D real-valued fast Fourier transform."
+ description: <<END
+Computes the inverse 2-dimensional discrete Fourier transform of a real-valued
+signal over the inner-most 2 dimensions of `input`.
+
+The inner-most 2 dimensions of `input` are assumed to be the result of `RFFT2D`:
+The inner-most dimension contains the `fft_length / 2 + 1` unique components of
+the DFT of a real-valued signal. If `fft_length` is not provided, it is computed
+from the size of the inner-most 2 dimensions of `input`. If the FFT length used
+to compute `input` is odd, it should be provided since it cannot be inferred
+properly.
+
+Along each axis `IRFFT2D` is computed on, if `fft_length` (or
+`fft_length / 2 + 1` for the inner-most dimension) is smaller than the
+corresponding dimension of `input`, the dimension is cropped. If it is larger,
+the dimension is padded with zeros.
+END
+}
+op {
+ graph_op_name: "IRFFT3D"
+ endpoint {
+ name: "IRFFT3D"
+ }
+ summary: "Inverse 3D real-valued fast Fourier transform."
+ description: <<END
+Computes the inverse 3-dimensional discrete Fourier transform of a real-valued
+signal over the inner-most 3 dimensions of `input`.
+
+The inner-most 3 dimensions of `input` are assumed to be the result of `RFFT3D`:
+The inner-most dimension contains the `fft_length / 2 + 1` unique components of
+the DFT of a real-valued signal. If `fft_length` is not provided, it is computed
+from the size of the inner-most 3 dimensions of `input`. If the FFT length used
+to compute `input` is odd, it should be provided since it cannot be inferred
+properly.
+
+Along each axis `IRFFT3D` is computed on, if `fft_length` (or
+`fft_length / 2 + 1` for the inner-most dimension) is smaller than the
+corresponding dimension of `input`, the dimension is cropped. If it is larger,
+the dimension is padded with zeros.
+END
+}
+op {
+ graph_op_name: "Identity"
+ endpoint {
+ name: "Identity"
+ }
+ summary: "Return a tensor with the same shape and contents as the input tensor or value."
+}
+op {
+ graph_op_name: "IdentityN"
+ endpoint {
+ name: "IdentityN"
+ }
+ summary: "Returns a list of tensors with the same shapes and contents as the input"
+ description: <<END
+tensors.
+
+This op can be used to override the gradient for complicated functions. For
+example, suppose y = f(x) and we wish to apply a custom function g for backprop
+such that dx = g(dy). In Python,
+
+```python
+with tf.get_default_graph().gradient_override_map(
+ {'IdentityN': 'OverrideGradientWithG'}):
+ y, _ = identity_n([f(x), x])
+
+@tf.RegisterGradient('OverrideGradientWithG')
+def ApplyG(op, dy, _):
+ return [None, g(dy)] # Do not backprop to f(x).
+```
+END
+}
+op {
+ graph_op_name: "IdentityReader"
+ endpoint {
+ name: "IdentityReader"
+ }
+ summary: "A Reader that outputs the queued work as both the key and value."
+ description: <<END
+To use, enqueue strings in a Queue. ReaderRead will take the front
+work string and output (work, work).
+END
+}
+op {
+ graph_op_name: "IdentityReaderV2"
+ endpoint {
+ name: "IdentityReaderV2"
+ }
+ summary: "A Reader that outputs the queued work as both the key and value."
+ description: <<END
+To use, enqueue strings in a Queue. ReaderRead will take the front
+work string and output (work, work).
+END
+}
+op {
+ graph_op_name: "Igamma"
+ endpoint {
+ name: "Igamma"
+ }
+ summary: "Compute the lower regularized incomplete Gamma function `Q(a, x)`."
+ description: <<END
+The lower regularized incomplete Gamma function is defined as:
+
+
+\\(P(a, x) = gamma(a, x) / Gamma(a) = 1 - Q(a, x)\\)
+
+where
+
+\\(gamma(a, x) = int_{0}^{x} t^{a-1} exp(-t) dt\\)
+
+is the lower incomplete Gamma function.
+
+Note, above `Q(a, x)` (`Igammac`) is the upper regularized complete
+Gamma function.
+END
+}
+op {
+ graph_op_name: "Igammac"
+ endpoint {
+ name: "Igammac"
+ }
+ summary: "Compute the upper regularized incomplete Gamma function `Q(a, x)`."
+ description: <<END
+The upper regularized incomplete Gamma function is defined as:
+
+\\(Q(a, x) = Gamma(a, x) / Gamma(a) = 1 - P(a, x)\\)
+
+where
+
+\\(Gamma(a, x) = int_{x}^{\infty} t^{a-1} exp(-t) dt\\)
+
+is the upper incomplete Gama function.
+
+Note, above `P(a, x)` (`Igamma`) is the lower regularized complete
+Gamma function.
+END
+}
+op {
+ graph_op_name: "IgnoreErrorsDataset"
+ endpoint {
+ name: "IgnoreErrorsDataset"
+ }
+ summary: "Creates a dataset that contains the elements of `input_dataset` ignoring errors."
+}
+op {
+ graph_op_name: "Imag"
+ endpoint {
+ name: "Imag"
+ }
+ summary: "Returns the imaginary part of a complex number."
+ description: <<END
+Given a tensor `input` of complex numbers, this operation returns a tensor of
+type `float` that is the imaginary part of each element in `input`. All
+elements in `input` must be complex numbers of the form \\(a + bj\\), where *a*
+is the real part and *b* is the imaginary part returned by this operation.
+
+For example:
+
+```
+# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
+tf.imag(input) ==> [4.75, 5.75]
+```
+END
+}
+op {
+ graph_op_name: "ImageSummary"
+ endpoint {
+ name: "ImageSummary"
+ }
+ summary: "Outputs a `Summary` protocol buffer with images."
+ description: <<END
+The summary has up to `max_images` summary values containing images. The
+images are built from `tensor` which must be 4-D with shape `[batch_size,
+height, width, channels]` and where `channels` can be:
+
+* 1: `tensor` is interpreted as Grayscale.
+* 3: `tensor` is interpreted as RGB.
+* 4: `tensor` is interpreted as RGBA.
+
+The images have the same number of channels as the input tensor. For float
+input, the values are normalized one image at a time to fit in the range
+`[0, 255]`. `uint8` values are unchanged. The op uses two different
+normalization algorithms:
+
+* If the input values are all positive, they are rescaled so the largest one
+ is 255.
+
+* If any input value is negative, the values are shifted so input value 0.0
+ is at 127. They are then rescaled so that either the smallest value is 0,
+ or the largest one is 255.
+
+The `tag` argument is a scalar `Tensor` of type `string`. It is used to
+build the `tag` of the summary values:
+
+* If `max_images` is 1, the summary value tag is '*tag*/image'.
+* If `max_images` is greater than 1, the summary value tags are
+ generated sequentially as '*tag*/image/0', '*tag*/image/1', etc.
+
+The `bad_color` argument is the color to use in the generated images for
+non-finite input values. It is a `unit8` 1-D tensor of length `channels`.
+Each element must be in the range `[0, 255]` (It represents the value of a
+pixel in the output image). Non-finite values in the input tensor are
+replaced by this tensor in the output image. The default value is the color
+red.
+END
+}
+op {
+ graph_op_name: "ImmutableConst"
+ endpoint {
+ name: "ImmutableConst"
+ }
+ summary: "Returns immutable tensor from memory region."
+ description: <<END
+The current implementation memmaps the tensor from a file.
+END
+}
+op {
+ graph_op_name: "InTopK"
+ endpoint {
+ name: "InTopK"
+ }
+ summary: "Says whether the targets are in the top `K` predictions."
+ description: <<END
+This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the
+prediction for the target class is among the top `k` predictions among
+all predictions for example `i`. Note that the behavior of `InTopK` differs
+from the `TopK` op in its handling of ties; if multiple classes have the
+same prediction value and straddle the top-`k` boundary, all of those
+classes are considered to be in the top `k`.
+
+More formally, let
+
+ \\(predictions_i\\) be the predictions for all classes for example `i`,
+ \\(targets_i\\) be the target class for example `i`,
+ \\(out_i\\) be the output for example `i`,
+
+$$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$
+END
+}
+op {
+ graph_op_name: "InTopKV2"
+ endpoint {
+ name: "InTopKV2"
+ }
+ summary: "Says whether the targets are in the top `K` predictions."
+ description: <<END
+This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the
+prediction for the target class is among the top `k` predictions among
+all predictions for example `i`. Note that the behavior of `InTopK` differs
+from the `TopK` op in its handling of ties; if multiple classes have the
+same prediction value and straddle the top-`k` boundary, all of those
+classes are considered to be in the top `k`.
+
+More formally, let
+
+ \\(predictions_i\\) be the predictions for all classes for example `i`,
+ \\(targets_i\\) be the target class for example `i`,
+ \\(out_i\\) be the output for example `i`,
+
+$$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$
+END
+}
+op {
+ graph_op_name: "InitializeTable"
+ endpoint {
+ name: "InitializeTable"
+ }
+ summary: "Table initializer that takes two tensors for keys and values respectively."
+}
+op {
+ graph_op_name: "InitializeTableFromTextFile"
+ endpoint {
+ name: "InitializeTableFromTextFile"
+ }
+ summary: "Initializes a table from a text file."
+ description: <<END
+It inserts one key-value pair into the table for each line of the file.
+The key and value is extracted from the whole line content, elements from the
+split line based on `delimiter` or the line number (starting from zero).
+Where to extract the key and value from a line is specified by `key_index` and
+`value_index`.
+
+- A value of -1 means use the line number(starting from zero), expects `int64`.
+- A value of -2 means use the whole line content, expects `string`.
+- A value >= 0 means use the index (starting at zero) of the split line based
+ on `delimiter`.
+END
+}
+op {
+ graph_op_name: "InitializeTableFromTextFileV2"
+ endpoint {
+ name: "InitializeTableFromTextFileV2"
+ }
+ summary: "Initializes a table from a text file."
+ description: <<END
+It inserts one key-value pair into the table for each line of the file.
+The key and value is extracted from the whole line content, elements from the
+split line based on `delimiter` or the line number (starting from zero).
+Where to extract the key and value from a line is specified by `key_index` and
+`value_index`.
+
+- A value of -1 means use the line number(starting from zero), expects `int64`.
+- A value of -2 means use the whole line content, expects `string`.
+- A value >= 0 means use the index (starting at zero) of the split line based
+ on `delimiter`.
+END
+}
+op {
+ graph_op_name: "InitializeTableV2"
+ endpoint {
+ name: "InitializeTableV2"
+ }
+ summary: "Table initializer that takes two tensors for keys and values respectively."
+}
+op {
+ graph_op_name: "InterleaveDataset"
+ endpoint {
+ name: "InterleaveDataset"
+ }
+ summary: "Creates a dataset that applies `f` to the outputs of `input_dataset`."
+ description: <<END
+Unlike MapDataset, the `f` in InterleaveDataset is expected to return
+a Dataset variant, and InterleaveDataset will flatten successive
+results into a single Dataset. Unlike FlatMapDataset,
+InterleaveDataset will interleave sequences of up to `block_length`
+consecutive elements from `cycle_length` input elements.
+END
+}
+op {
+ graph_op_name: "Inv"
+ endpoint {
+ name: "Inv"
+ }
+ summary: "Computes the reciprocal of x element-wise."
+ description: <<END
+I.e., \\(y = 1 / x\\).
+END
+}
+op {
+ graph_op_name: "InvGrad"
+ endpoint {
+ name: "InvGrad"
+ }
+ summary: "Computes the gradient for the inverse of `x` wrt its input."
+ description: <<END
+Specifically, `grad = -dy * y*y`, where `y = 1/x`, and `dy`
+is the corresponding input gradient.
+END
+}
+op {
+ graph_op_name: "Invert"
+ endpoint {
+ name: "Invert"
+ }
+ summary: "Flips all bits elementwise."
+ description: <<END
+The result will have exactly those bits set, that are not set in `x`. The
+computation is performed on the underlying representation of x.
+END
+}
+op {
+ graph_op_name: "InvertPermutation"
+ endpoint {
+ name: "InvertPermutation"
+ }
+ summary: "Computes the inverse permutation of a tensor."
+ description: <<END
+This operation computes the inverse of an index permutation. It takes a 1-D
+integer tensor `x`, which represents the indices of a zero-based array, and
+swaps each value with its index position. In other words, for an output tensor
+`y` and an input tensor `x`, this operation computes the following:
+
+`y[x[i]] = i for i in [0, 1, ..., len(x) - 1]`
+
+The values must include 0. There can be no duplicate values or negative values.
+
+For example:
+
+```
+# tensor `x` is [3, 4, 0, 2, 1]
+invert_permutation(x) ==> [2, 4, 3, 0, 1]
+```
+END
+}
+op {
+ graph_op_name: "IsFinite"
+ endpoint {
+ name: "IsFinite"
+ }
+ summary: "Returns which elements of x are finite."
+ description: <<END
+@compatibility(numpy)
+Equivalent to np.isfinite
+@end_compatibility
+END
+}
+op {
+ graph_op_name: "IsInf"
+ endpoint {
+ name: "IsInf"
+ }
+ summary: "Returns which elements of x are Inf."
+ description: <<END
+@compatibility(numpy)
+Equivalent to np.isinf
+@end_compatibility
+END
+}
+op {
+ graph_op_name: "IsNan"
+ endpoint {
+ name: "IsNan"
+ }
+ summary: "Returns which elements of x are NaN."
+ description: <<END
+@compatibility(numpy)
+Equivalent to np.isnan
+@end_compatibility
+END
+}
+op {
+ graph_op_name: "IsVariableInitialized"
+ endpoint {
+ name: "IsVariableInitialized"
+ }
+ summary: "Checks whether a tensor has been initialized."
+ description: <<END
+Outputs boolean scalar indicating whether the tensor has been initialized.
+END
+}
+op {
+ graph_op_name: "Iterator"
+ endpoint {
+ name: "Iterator"
+ }
+ summary: "A container for an iterator resource."
+}
+op {
+ graph_op_name: "IteratorFromStringHandle"
+ endpoint {
+ name: "IteratorFromStringHandle"
+ }
+ summary: "Converts the given string representing a handle to an iterator to a resource."
+}
+op {
+ graph_op_name: "IteratorGetNext"
+ endpoint {
+ name: "IteratorGetNext"
+ }
+ summary: "Gets the next output from the given iterator."
+}
+op {
+ graph_op_name: "IteratorToStringHandle"
+ endpoint {
+ name: "IteratorToStringHandle"
+ }
+ summary: "Converts the given `resource_handle` representing an iterator to a string."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_L.pbtxt b/tensorflow/core/api_def/base_api/api_def_L.pbtxt
new file mode 100644
index 0000000000..09e55eacc7
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_L.pbtxt
@@ -0,0 +1,392 @@
+op {
+ graph_op_name: "L2Loss"
+ endpoint {
+ name: "L2Loss"
+ }
+ summary: "L2 Loss."
+ description: <<END
+Computes half the L2 norm of a tensor without the `sqrt`:
+
+ output = sum(t ** 2) / 2
+END
+}
+op {
+ graph_op_name: "LMDBReader"
+ endpoint {
+ name: "LMDBReader"
+ }
+ summary: "A Reader that outputs the records from a LMDB file."
+}
+op {
+ graph_op_name: "LRN"
+ endpoint {
+ name: "LRN"
+ }
+ summary: "Local Response Normalization."
+ description: <<END
+The 4-D `input` tensor is treated as a 3-D array of 1-D vectors (along the last
+dimension), and each vector is normalized independently. Within a given vector,
+each component is divided by the weighted, squared sum of inputs within
+`depth_radius`. In detail,
+
+ sqr_sum[a, b, c, d] =
+ sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2)
+ output = input / (bias + alpha * sqr_sum) ** beta
+
+For details, see [Krizhevsky et al., ImageNet classification with deep
+convolutional neural networks (NIPS 2012)](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks).
+END
+}
+op {
+ graph_op_name: "LRNGrad"
+ endpoint {
+ name: "LRNGrad"
+ }
+ summary: "Gradients for Local Response Normalization."
+}
+op {
+ graph_op_name: "LearnedUnigramCandidateSampler"
+ endpoint {
+ name: "LearnedUnigramCandidateSampler"
+ }
+ summary: "Generates labels for candidate sampling with a learned unigram distribution."
+ description: <<END
+See explanations of candidate sampling and the data formats at
+go/candidate-sampling.
+
+For each batch, this op picks a single set of sampled candidate labels.
+
+The advantages of sampling candidates per-batch are simplicity and the
+possibility of efficient dense matrix multiplication. The disadvantage is that
+the sampled candidates must be chosen independently of the context and of the
+true labels.
+END
+}
+op {
+ graph_op_name: "Less"
+ endpoint {
+ name: "Less"
+ }
+ summary: "Returns the truth value of (x < y) element-wise."
+ description: <<END
+*NOTE*: `Less` supports broadcasting. More about broadcasting
+[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+END
+}
+op {
+ graph_op_name: "LessEqual"
+ endpoint {
+ name: "LessEqual"
+ }
+ summary: "Returns the truth value of (x <= y) element-wise."
+ description: <<END
+*NOTE*: `LessEqual` supports broadcasting. More about broadcasting
+[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+END
+}
+op {
+ graph_op_name: "Lgamma"
+ endpoint {
+ name: "Lgamma"
+ }
+ summary: "Computes the log of the absolute value of `Gamma(x)` element-wise."
+}
+op {
+ graph_op_name: "LinSpace"
+ endpoint {
+ name: "LinSpace"
+ }
+ summary: "Generates values in an interval."
+ description: <<END
+A sequence of `num` evenly-spaced values are generated beginning at `start`.
+If `num > 1`, the values in the sequence increase by `stop - start / num - 1`,
+so that the last one is exactly `stop`.
+
+For example:
+
+```
+tf.linspace(10.0, 12.0, 3, name="linspace") => [ 10.0 11.0 12.0]
+```
+END
+}
+op {
+ graph_op_name: "ListDiff"
+ endpoint {
+ name: "ListDiff"
+ }
+ summary: "Computes the difference between two lists of numbers or strings."
+ description: <<END
+Given a list `x` and a list `y`, this operation returns a list `out` that
+represents all values that are in `x` but not in `y`. The returned list `out`
+is sorted in the same order that the numbers appear in `x` (duplicates are
+preserved). This operation also returns a list `idx` that represents the
+position of each `out` element in `x`. In other words:
+
+`out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]`
+
+For example, given this input:
+
+```
+x = [1, 2, 3, 4, 5, 6]
+y = [1, 3, 5]
+```
+
+This operation would return:
+
+```
+out ==> [2, 4, 6]
+idx ==> [1, 3, 5]
+```
+END
+}
+op {
+ graph_op_name: "LoadAndRemapMatrix"
+ endpoint {
+ name: "LoadAndRemapMatrix"
+ }
+ summary: "Loads a 2-D (matrix) `Tensor` with name `old_tensor_name` from the checkpoint"
+ description: <<END
+at `ckpt_path` and potentially reorders its rows and columns using the
+specified remappings.
+
+Most users should use one of the wrapper initializers (such as
+`tf.contrib.framework.load_and_remap_matrix_initializer`) instead of this
+function directly.
+
+The remappings are 1-D tensors with the following properties:
+
+* `row_remapping` must have exactly `num_rows` entries. Row `i` of the output
+ matrix will be initialized from the row corresponding to index
+ `row_remapping[i]` in the old `Tensor` from the checkpoint.
+* `col_remapping` must have either 0 entries (indicating that no column
+ reordering is needed) or `num_cols` entries. If specified, column `j` of the
+ output matrix will be initialized from the column corresponding to index
+ `col_remapping[j]` in the old `Tensor` from the checkpoint.
+* A value of -1 in either of the remappings signifies a "missing" entry. In that
+ case, values from the `initializing_values` tensor will be used to fill that
+ missing row or column. If `row_remapping` has `r` missing entries and
+ `col_remapping` has `c` missing entries, then the following condition must be
+ true:
+
+`(r * num_cols) + (c * num_rows) - (r * c) == len(initializing_values)`
+
+The remapping tensors can be generated using the GenerateVocabRemapping op.
+
+As an example, with row_remapping = [1, 0, -1], col_remapping = [0, 2, -1],
+initializing_values = [0.5, -0.5, 0.25, -0.25, 42], and w(i, j) representing
+the value from row i, column j of the old tensor in the checkpoint, the output
+matrix will look like the following:
+
+[[w(1, 0), w(1, 2), 0.5],
+ [w(0, 0), w(0, 2), -0.5],
+ [0.25, -0.25, 42]]
+END
+}
+op {
+ graph_op_name: "Log"
+ endpoint {
+ name: "Log"
+ }
+ summary: "Computes natural logarithm of x element-wise."
+ description: <<END
+I.e., \\(y = \log_e x\\).
+END
+}
+op {
+ graph_op_name: "Log1p"
+ endpoint {
+ name: "Log1p"
+ }
+ summary: "Computes natural logarithm of (1 + x) element-wise."
+ description: <<END
+I.e., \\(y = \log_e (1 + x)\\).
+END
+}
+op {
+ graph_op_name: "LogMatrixDeterminant"
+ endpoint {
+ name: "LogMatrixDeterminant"
+ }
+ summary: "Computes the sign and the log of the absolute value of the determinant of"
+ description: <<END
+one or more square matrices.
+
+The input is a tensor of shape `[N, M, M]` whose inner-most 2 dimensions
+form square matrices. The outputs are two tensors containing the signs and
+absolute values of the log determinants for all N input submatrices
+`[..., :, :]` such that the determinant = sign*exp(log_abs_determinant).
+The log_abs_determinant is computed as det(P)*sum(log(diag(LU))) where LU
+is the LU decomposition of the input and P is the corresponding
+permutation matrix.
+END
+}
+op {
+ graph_op_name: "LogSoftmax"
+ endpoint {
+ name: "LogSoftmax"
+ }
+ summary: "Computes log softmax activations."
+ description: <<END
+For each batch `i` and class `j` we have
+
+ logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i])))
+END
+}
+op {
+ graph_op_name: "LogUniformCandidateSampler"
+ endpoint {
+ name: "LogUniformCandidateSampler"
+ }
+ summary: "Generates labels for candidate sampling with a log-uniform distribution."
+ description: <<END
+See explanations of candidate sampling and the data formats at
+go/candidate-sampling.
+
+For each batch, this op picks a single set of sampled candidate labels.
+
+The advantages of sampling candidates per-batch are simplicity and the
+possibility of efficient dense matrix multiplication. The disadvantage is that
+the sampled candidates must be chosen independently of the context and of the
+true labels.
+END
+}
+op {
+ graph_op_name: "LogicalAnd"
+ endpoint {
+ name: "LogicalAnd"
+ }
+ summary: "Returns the truth value of x AND y element-wise."
+ description: <<END
+*NOTE*: `LogicalAnd` supports broadcasting. More about broadcasting
+[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+END
+}
+op {
+ graph_op_name: "LogicalNot"
+ endpoint {
+ name: "LogicalNot"
+ }
+ summary: "Returns the truth value of NOT x element-wise."
+}
+op {
+ graph_op_name: "LogicalOr"
+ endpoint {
+ name: "LogicalOr"
+ }
+ summary: "Returns the truth value of x OR y element-wise."
+ description: <<END
+*NOTE*: `LogicalOr` supports broadcasting. More about broadcasting
+[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+END
+}
+op {
+ graph_op_name: "LookupTableExport"
+ endpoint {
+ name: "LookupTableExport"
+ }
+ summary: "Outputs all keys and values in the table."
+}
+op {
+ graph_op_name: "LookupTableExportV2"
+ endpoint {
+ name: "LookupTableExportV2"
+ }
+ summary: "Outputs all keys and values in the table."
+}
+op {
+ graph_op_name: "LookupTableFind"
+ endpoint {
+ name: "LookupTableFind"
+ }
+ summary: "Looks up keys in a table, outputs the corresponding values."
+ description: <<END
+The tensor `keys` must of the same type as the keys of the table.
+The output `values` is of the type of the table values.
+
+The scalar `default_value` is the value output for keys not present in the
+table. It must also be of the same type as the table values.
+END
+}
+op {
+ graph_op_name: "LookupTableFindV2"
+ endpoint {
+ name: "LookupTableFindV2"
+ }
+ summary: "Looks up keys in a table, outputs the corresponding values."
+ description: <<END
+The tensor `keys` must of the same type as the keys of the table.
+The output `values` is of the type of the table values.
+
+The scalar `default_value` is the value output for keys not present in the
+table. It must also be of the same type as the table values.
+END
+}
+op {
+ graph_op_name: "LookupTableImport"
+ endpoint {
+ name: "LookupTableImport"
+ }
+ summary: "Replaces the contents of the table with the specified keys and values."
+ description: <<END
+The tensor `keys` must be of the same type as the keys of the table.
+The tensor `values` must be of the type of the table values.
+END
+}
+op {
+ graph_op_name: "LookupTableImportV2"
+ endpoint {
+ name: "LookupTableImportV2"
+ }
+ summary: "Replaces the contents of the table with the specified keys and values."
+ description: <<END
+The tensor `keys` must be of the same type as the keys of the table.
+The tensor `values` must be of the type of the table values.
+END
+}
+op {
+ graph_op_name: "LookupTableInsert"
+ endpoint {
+ name: "LookupTableInsert"
+ }
+ summary: "Updates the table to associates keys with values."
+ description: <<END
+The tensor `keys` must be of the same type as the keys of the table.
+The tensor `values` must be of the type of the table values.
+END
+}
+op {
+ graph_op_name: "LookupTableInsertV2"
+ endpoint {
+ name: "LookupTableInsertV2"
+ }
+ summary: "Updates the table to associates keys with values."
+ description: <<END
+The tensor `keys` must be of the same type as the keys of the table.
+The tensor `values` must be of the type of the table values.
+END
+}
+op {
+ graph_op_name: "LookupTableSize"
+ endpoint {
+ name: "LookupTableSize"
+ }
+ summary: "Computes the number of elements in the given table."
+}
+op {
+ graph_op_name: "LookupTableSizeV2"
+ endpoint {
+ name: "LookupTableSizeV2"
+ }
+ summary: "Computes the number of elements in the given table."
+}
+op {
+ graph_op_name: "LoopCond"
+ endpoint {
+ name: "LoopCond"
+ }
+ summary: "Forwards the input to the output."
+ description: <<END
+This operator represents the loop termination condition used by the
+"pivot" switches of a loop.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_M.pbtxt b/tensorflow/core/api_def/base_api/api_def_M.pbtxt
new file mode 100644
index 0000000000..7295928bad
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_M.pbtxt
@@ -0,0 +1,749 @@
+op {
+ graph_op_name: "MakeIterator"
+ endpoint {
+ name: "MakeIterator"
+ }
+ summary: "Makes a new iterator from the given `dataset` and stores it in `iterator`."
+ description: <<END
+This operation may be executed multiple times. Each execution will reset the
+iterator in `iterator` to the first element of `dataset`.
+END
+}
+op {
+ graph_op_name: "MapClear"
+ endpoint {
+ name: "MapClear"
+ }
+ summary: "Op removes all elements in the underlying container."
+}
+op {
+ graph_op_name: "MapDataset"
+ endpoint {
+ name: "MapDataset"
+ }
+ summary: "Creates a dataset that applies `f` to the outputs of `input_dataset`."
+}
+op {
+ graph_op_name: "MapIncompleteSize"
+ endpoint {
+ name: "MapIncompleteSize"
+ }
+ summary: "Op returns the number of incomplete elements in the underlying container."
+}
+op {
+ graph_op_name: "MapPeek"
+ endpoint {
+ name: "MapPeek"
+ }
+ summary: "Op peeks at the values at the specified key. If the"
+ description: <<END
+underlying container does not contain this key
+this op will block until it does.
+END
+}
+op {
+ graph_op_name: "MapSize"
+ endpoint {
+ name: "MapSize"
+ }
+ summary: "Op returns the number of elements in the underlying container."
+}
+op {
+ graph_op_name: "MapStage"
+ endpoint {
+ name: "MapStage"
+ }
+ summary: "Stage (key, values) in the underlying container which behaves like a hashtable."
+}
+op {
+ graph_op_name: "MapUnstage"
+ endpoint {
+ name: "MapUnstage"
+ }
+ summary: "Op removes and returns the values associated with the key"
+ description: <<END
+from the underlying container. If the underlying container
+does not contain this key, the op will block until it does.
+END
+}
+op {
+ graph_op_name: "MapUnstageNoKey"
+ endpoint {
+ name: "MapUnstageNoKey"
+ }
+ summary: "Op removes and returns a random (key, value)"
+ description: <<END
+from the underlying container. If the underlying container
+does not contain elements, the op will block until it does.
+END
+}
+op {
+ graph_op_name: "MatMul"
+ endpoint {
+ name: "MatMul"
+ }
+ summary: "Multiply the matrix \"a\" by the matrix \"b\"."
+ description: <<END
+The inputs must be two-dimensional matrices and the inner dimension of
+"a" (after being transposed if transpose_a is true) must match the
+outer dimension of "b" (after being transposed if transposed_b is
+true).
+
+*Note*: The default kernel implementation for MatMul on GPUs uses
+cublas.
+END
+}
+op {
+ graph_op_name: "MatchingFiles"
+ endpoint {
+ name: "MatchingFiles"
+ }
+ summary: "Returns the set of files matching one or more glob patterns."
+ description: <<END
+Note that this routine only supports wildcard characters in the
+basename portion of the pattern, not in the directory portion.
+END
+}
+op {
+ graph_op_name: "MatrixBandPart"
+ endpoint {
+ name: "MatrixBandPart"
+ }
+ summary: "Copy a tensor setting everything outside a central band in each innermost matrix"
+ description: <<END
+to zero.
+
+The `band` part is computed as follows:
+Assume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a
+tensor with the same shape where
+
+`band[i, j, k, ..., m, n] = in_band(m, n) * input[i, j, k, ..., m, n]`.
+
+The indicator function
+
+`in_band(m, n) = (num_lower < 0 || (m-n) <= num_lower)) &&
+ (num_upper < 0 || (n-m) <= num_upper)`.
+
+For example:
+
+```
+# if 'input' is [[ 0, 1, 2, 3]
+ [-1, 0, 1, 2]
+ [-2, -1, 0, 1]
+ [-3, -2, -1, 0]],
+
+tf.matrix_band_part(input, 1, -1) ==> [[ 0, 1, 2, 3]
+ [-1, 0, 1, 2]
+ [ 0, -1, 0, 1]
+ [ 0, 0, -1, 0]],
+
+tf.matrix_band_part(input, 2, 1) ==> [[ 0, 1, 0, 0]
+ [-1, 0, 1, 0]
+ [-2, -1, 0, 1]
+ [ 0, -2, -1, 0]]
+```
+
+Useful special cases:
+
+```
+ tf.matrix_band_part(input, 0, -1) ==> Upper triangular part.
+ tf.matrix_band_part(input, -1, 0) ==> Lower triangular part.
+ tf.matrix_band_part(input, 0, 0) ==> Diagonal.
+```
+END
+}
+op {
+ graph_op_name: "MatrixDeterminant"
+ endpoint {
+ name: "MatrixDeterminant"
+ }
+ summary: "Computes the determinant of one or more square matrices."
+ description: <<END
+The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
+form square matrices. The output is a tensor containing the determinants
+for all input submatrices `[..., :, :]`.
+END
+}
+op {
+ graph_op_name: "MatrixDiag"
+ endpoint {
+ name: "MatrixDiag"
+ }
+ summary: "Returns a batched diagonal tensor with a given batched diagonal values."
+ description: <<END
+Given a `diagonal`, this operation returns a tensor with the `diagonal` and
+everything else padded with zeros. The diagonal is computed as follows:
+
+Assume `diagonal` has `k` dimensions `[I, J, K, ..., N]`, then the output is a
+tensor of rank `k+1` with dimensions [I, J, K, ..., N, N]` where:
+
+`output[i, j, k, ..., m, n] = 1{m=n} * diagonal[i, j, k, ..., n]`.
+
+For example:
+
+```
+# 'diagonal' is [[1, 2, 3, 4], [5, 6, 7, 8]]
+
+and diagonal.shape = (2, 4)
+
+tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0]
+ [0, 2, 0, 0]
+ [0, 0, 3, 0]
+ [0, 0, 0, 4]],
+ [[5, 0, 0, 0]
+ [0, 6, 0, 0]
+ [0, 0, 7, 0]
+ [0, 0, 0, 8]]]
+
+which has shape (2, 4, 4)
+```
+END
+}
+op {
+ graph_op_name: "MatrixDiagPart"
+ endpoint {
+ name: "MatrixDiagPart"
+ }
+ summary: "Returns the batched diagonal part of a batched tensor."
+ description: <<END
+This operation returns a tensor with the `diagonal` part
+of the batched `input`. The `diagonal` part is computed as follows:
+
+Assume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a
+tensor of rank `k - 1` with dimensions `[I, J, K, ..., min(M, N)]` where:
+
+`diagonal[i, j, k, ..., n] = input[i, j, k, ..., n, n]`.
+
+The input must be at least a matrix.
+
+For example:
+
+```
+# 'input' is [[[1, 0, 0, 0]
+ [0, 2, 0, 0]
+ [0, 0, 3, 0]
+ [0, 0, 0, 4]],
+ [[5, 0, 0, 0]
+ [0, 6, 0, 0]
+ [0, 0, 7, 0]
+ [0, 0, 0, 8]]]
+
+and input.shape = (2, 4, 4)
+
+tf.matrix_diag_part(input) ==> [[1, 2, 3, 4], [5, 6, 7, 8]]
+
+which has shape (2, 4)
+```
+END
+}
+op {
+ graph_op_name: "MatrixInverse"
+ endpoint {
+ name: "MatrixInverse"
+ }
+ summary: "Computes the inverse of one or more square invertible matrices or their"
+ description: <<END
+adjoints (conjugate transposes).
+
+The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
+form square matrices. The output is a tensor of the same shape as the input
+containing the inverse for all input submatrices `[..., :, :]`.
+
+The op uses LU decomposition with partial pivoting to compute the inverses.
+
+If a matrix is not invertible there is no guarantee what the op does. It
+may detect the condition and raise an exception or it may simply return a
+garbage result.
+END
+}
+op {
+ graph_op_name: "MatrixSetDiag"
+ endpoint {
+ name: "MatrixSetDiag"
+ }
+ summary: "Returns a batched matrix tensor with new batched diagonal values."
+ description: <<END
+Given `input` and `diagonal`, this operation returns a tensor with the
+same shape and values as `input`, except for the main diagonal of the
+innermost matrices. These will be overwritten by the values in `diagonal`.
+
+The output is computed as follows:
+
+Assume `input` has `k+1` dimensions `[I, J, K, ..., M, N]` and `diagonal` has
+`k` dimensions `[I, J, K, ..., min(M, N)]`. Then the output is a
+tensor of rank `k+1` with dimensions `[I, J, K, ..., M, N]` where:
+
+ * `output[i, j, k, ..., m, n] = diagonal[i, j, k, ..., n]` for `m == n`.
+ * `output[i, j, k, ..., m, n] = input[i, j, k, ..., m, n]` for `m != n`.
+END
+}
+op {
+ graph_op_name: "MatrixSolve"
+ endpoint {
+ name: "MatrixSolve"
+ }
+ summary: "Solves systems of linear equations."
+ description: <<END
+`Matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
+form square matrices. `Rhs` is a tensor of shape `[..., M, K]`. The `output` is
+a tensor shape `[..., M, K]`. If `adjoint` is `False` then each output matrix
+satisfies `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`.
+If `adjoint` is `True` then each output matrix satisfies
+`adjoint(matrix[..., :, :]) * output[..., :, :] = rhs[..., :, :]`.
+END
+}
+op {
+ graph_op_name: "MatrixSolveLs"
+ endpoint {
+ name: "MatrixSolveLs"
+ }
+ summary: "Solves one or more linear least-squares problems."
+ description: <<END
+`matrix` is a tensor of shape `[..., M, N]` whose inner-most 2 dimensions
+form real or complex matrices of size `[M, N]`. `Rhs` is a tensor of the same
+type as `matrix` and shape `[..., M, K]`.
+The output is a tensor shape `[..., N, K]` where each output matrix solves
+each of the equations
+`matrix[..., :, :]` * `output[..., :, :]` = `rhs[..., :, :]`
+in the least squares sense.
+
+We use the following notation for (complex) matrix and right-hand sides
+in the batch:
+
+`matrix`=\\(A \in \mathbb{C}^{m \times n}\\),
+`rhs`=\\(B \in \mathbb{C}^{m \times k}\\),
+`output`=\\(X \in \mathbb{C}^{n \times k}\\),
+`l2_regularizer`=\\(\lambda \in \mathbb{R}\\).
+
+If `fast` is `True`, then the solution is computed by solving the normal
+equations using Cholesky decomposition. Specifically, if \\(m \ge n\\) then
+\\(X = (A^H A + \lambda I)^{-1} A^H B\\), which solves the least-squares
+problem \\(X = \mathrm{argmin}_{Z \in \Re^{n \times k} } ||A Z - B||_F^2 +
+\lambda ||Z||_F^2\\). If \\(m \lt n\\) then `output` is computed as
+\\(X = A^H (A A^H + \lambda I)^{-1} B\\), which (for \\(\lambda = 0\\)) is the
+minimum-norm solution to the under-determined linear system, i.e.
+\\(X = \mathrm{argmin}_{Z \in \mathbb{C}^{n \times k} } ||Z||_F^2 \\),
+subject to \\(A Z = B\\). Notice that the fast path is only numerically stable
+when \\(A\\) is numerically full rank and has a condition number
+\\(\mathrm{cond}(A) \lt \frac{1}{\sqrt{\epsilon_{mach} } }\\) or\\(\lambda\\) is
+sufficiently large.
+
+If `fast` is `False` an algorithm based on the numerically robust complete
+orthogonal decomposition is used. This computes the minimum-norm
+least-squares solution, even when \\(A\\) is rank deficient. This path is
+typically 6-7 times slower than the fast path. If `fast` is `False` then
+`l2_regularizer` is ignored.
+END
+}
+op {
+ graph_op_name: "MatrixTriangularSolve"
+ endpoint {
+ name: "MatrixTriangularSolve"
+ }
+ summary: "Solves systems of linear equations with upper or lower triangular matrices by"
+ description: <<END
+backsubstitution.
+
+`matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions form
+square matrices. If `lower` is `True` then the strictly upper triangular part
+of each inner-most matrix is assumed to be zero and not accessed.
+If `lower` is False then the strictly lower triangular part of each inner-most
+matrix is assumed to be zero and not accessed.
+`rhs` is a tensor of shape `[..., M, K]`.
+
+The output is a tensor of shape `[..., M, K]`. If `adjoint` is
+`True` then the innermost matrices in `output` satisfy matrix equations
+`matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`.
+If `adjoint` is `False` then the strictly then the innermost matrices in
+`output` satisfy matrix equations
+`adjoint(matrix[..., i, k]) * output[..., k, j] = rhs[..., i, j]`.
+END
+}
+op {
+ graph_op_name: "Max"
+ endpoint {
+ name: "Max"
+ }
+ summary: "Computes the maximum of elements across dimensions of a tensor."
+ description: <<END
+Reduces `input` along the dimensions given in `reduction_indices`. Unless
+`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
+`reduction_indices`. If `keep_dims` is true, the reduced dimensions are
+retained with length 1.
+END
+}
+op {
+ graph_op_name: "MaxPool"
+ endpoint {
+ name: "MaxPool"
+ }
+ summary: "Performs max pooling on the input."
+}
+op {
+ graph_op_name: "MaxPool3D"
+ endpoint {
+ name: "MaxPool3D"
+ }
+ summary: "Performs 3D max pooling on the input."
+}
+op {
+ graph_op_name: "MaxPool3DGrad"
+ endpoint {
+ name: "MaxPool3DGrad"
+ }
+ summary: "Computes gradients of max pooling function."
+}
+op {
+ graph_op_name: "MaxPool3DGradGrad"
+ endpoint {
+ name: "MaxPool3DGradGrad"
+ }
+ summary: "Computes second-order gradients of the maxpooling function."
+}
+op {
+ graph_op_name: "MaxPoolGrad"
+ endpoint {
+ name: "MaxPoolGrad"
+ }
+ summary: "Computes gradients of the maxpooling function."
+}
+op {
+ graph_op_name: "MaxPoolGradGrad"
+ endpoint {
+ name: "MaxPoolGradGrad"
+ }
+ summary: "Computes second-order gradients of the maxpooling function."
+}
+op {
+ graph_op_name: "MaxPoolGradGradV2"
+ endpoint {
+ name: "MaxPoolGradGradV2"
+ }
+ summary: "Computes second-order gradients of the maxpooling function."
+}
+op {
+ graph_op_name: "MaxPoolGradGradWithArgmax"
+ endpoint {
+ name: "MaxPoolGradGradWithArgmax"
+ }
+ summary: "Computes second-order gradients of the maxpooling function."
+}
+op {
+ graph_op_name: "MaxPoolGradV2"
+ endpoint {
+ name: "MaxPoolGradV2"
+ }
+ summary: "Computes gradients of the maxpooling function."
+}
+op {
+ graph_op_name: "MaxPoolGradWithArgmax"
+ endpoint {
+ name: "MaxPoolGradWithArgmax"
+ }
+ summary: "Computes gradients of the maxpooling function."
+}
+op {
+ graph_op_name: "MaxPoolV2"
+ endpoint {
+ name: "MaxPoolV2"
+ }
+ summary: "Performs max pooling on the input."
+}
+op {
+ graph_op_name: "MaxPoolWithArgmax"
+ endpoint {
+ name: "MaxPoolWithArgmax"
+ }
+ summary: "Performs max pooling on the input and outputs both max values and indices."
+ description: <<END
+The indices in `argmax` are flattened, so that a maximum value at position
+`[b, y, x, c]` becomes flattened index
+`((b * height + y) * width + x) * channels + c`.
+
+The indices returned are always in `[0, height) x [0, width)` before flattening,
+even if padding is involved and the mathematically correct answer is outside
+(either negative or too large). This is a bug, but fixing it is difficult to do
+in a safe backwards compatible way, especially due to flattening.
+END
+}
+op {
+ graph_op_name: "Maximum"
+ endpoint {
+ name: "Maximum"
+ }
+ summary: "Returns the max of x and y (i.e. x > y ? x : y) element-wise."
+ description: <<END
+*NOTE*: `Maximum` supports broadcasting. More about broadcasting
+[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+END
+}
+op {
+ graph_op_name: "Mean"
+ endpoint {
+ name: "Mean"
+ }
+ summary: "Computes the mean of elements across dimensions of a tensor."
+ description: <<END
+Reduces `input` along the dimensions given in `reduction_indices`. Unless
+`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
+`reduction_indices`. If `keep_dims` is true, the reduced dimensions are
+retained with length 1.
+END
+}
+op {
+ graph_op_name: "Merge"
+ endpoint {
+ name: "Merge"
+ }
+ summary: "Forwards the value of an available tensor from `inputs` to `output`."
+ description: <<END
+`Merge` waits for at least one of the tensors in `inputs` to become available.
+It is usually combined with `Switch` to implement branching.
+
+`Merge` forwards the first tensor to become available to `output`, and sets
+`value_index` to its index in `inputs`.
+END
+}
+op {
+ graph_op_name: "MergeSummary"
+ endpoint {
+ name: "MergeSummary"
+ }
+ summary: "Merges summaries."
+ description: <<END
+This op creates a
+[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
+protocol buffer that contains the union of all the values in the input
+summaries.
+
+When the Op is run, it reports an `InvalidArgument` error if multiple values
+in the summaries to merge use the same tag.
+END
+}
+op {
+ graph_op_name: "MergeV2Checkpoints"
+ endpoint {
+ name: "MergeV2Checkpoints"
+ }
+ summary: "V2 format specific: merges the metadata files of sharded checkpoints. The"
+ description: <<END
+result is one logical checkpoint, with one physical metadata file and renamed
+data files.
+
+Intended for "grouping" multiple checkpoints in a sharded checkpoint setup.
+
+If delete_old_dirs is true, attempts to delete recursively the dirname of each
+path in the input checkpoint_prefixes. This is useful when those paths are non
+user-facing temporary locations.
+END
+}
+op {
+ graph_op_name: "Mfcc"
+ endpoint {
+ name: "Mfcc"
+ }
+ summary: "Transforms a spectrogram into a form that\'s useful for speech recognition."
+ description: <<END
+Mel Frequency Cepstral Coefficients are a way of representing audio data that's
+been effective as an input feature for machine learning. They are created by
+taking the spectrum of a spectrogram (a 'cepstrum'), and discarding some of the
+higher frequencies that are less significant to the human ear. They have a long
+history in the speech recognition world, and https://en.wikipedia.org/wiki/Mel-frequency_cepstrum
+is a good resource to learn more.
+END
+}
+op {
+ graph_op_name: "Min"
+ endpoint {
+ name: "Min"
+ }
+ summary: "Computes the minimum of elements across dimensions of a tensor."
+ description: <<END
+Reduces `input` along the dimensions given in `reduction_indices`. Unless
+`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
+`reduction_indices`. If `keep_dims` is true, the reduced dimensions are
+retained with length 1.
+END
+}
+op {
+ graph_op_name: "Minimum"
+ endpoint {
+ name: "Minimum"
+ }
+ summary: "Returns the min of x and y (i.e. x < y ? x : y) element-wise."
+ description: <<END
+*NOTE*: `Minimum` supports broadcasting. More about broadcasting
+[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+END
+}
+op {
+ graph_op_name: "MirrorPad"
+ endpoint {
+ name: "MirrorPad"
+ }
+ summary: "Pads a tensor with mirrored values."
+ description: <<END
+This operation pads a `input` with mirrored values according to the `paddings`
+you specify. `paddings` is an integer tensor with shape `[n, 2]`, where n is
+the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates
+how many values to add before the contents of `input` in that dimension, and
+`paddings[D, 1]` indicates how many values to add after the contents of `input`
+in that dimension. Both `paddings[D, 0]` and `paddings[D, 1]` must be no greater
+than `input.dim_size(D)` (or `input.dim_size(D) - 1`) if `copy_border` is true
+(if false, respectively).
+
+The padded size of each dimension D of the output is:
+
+`paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
+
+For example:
+
+```
+# 't' is [[1, 2, 3], [4, 5, 6]].
+# 'paddings' is [[1, 1]], [2, 2]].
+# 'mode' is SYMMETRIC.
+# rank of 't' is 2.
+pad(t, paddings) ==> [[2, 1, 1, 2, 3, 3, 2]
+ [2, 1, 1, 2, 3, 3, 2]
+ [5, 4, 4, 5, 6, 6, 5]
+ [5, 4, 4, 5, 6, 6, 5]]
+```
+END
+}
+op {
+ graph_op_name: "MirrorPadGrad"
+ endpoint {
+ name: "MirrorPadGrad"
+ }
+ summary: "Gradient op for `MirrorPad` op. This op folds a mirror-padded tensor."
+ description: <<END
+This operation folds the padded areas of `input` by `MirrorPad` according to the
+`paddings` you specify. `paddings` must be the same as `paddings` argument
+given to the corresponding `MirrorPad` op.
+
+The folded size of each dimension D of the output is:
+
+`input.dim_size(D) - paddings(D, 0) - paddings(D, 1)`
+
+For example:
+
+```
+# 't' is [[1, 2, 3], [4, 5, 6], [7, 8, 9]].
+# 'paddings' is [[0, 1]], [0, 1]].
+# 'mode' is SYMMETRIC.
+# rank of 't' is 2.
+pad(t, paddings) ==> [[ 1, 5]
+ [11, 28]]
+```
+END
+}
+op {
+ graph_op_name: "Mod"
+ endpoint {
+ name: "Mod"
+ }
+ summary: "Returns element-wise remainder of division. This emulates C semantics in that"
+ description: <<END
+the result here is consistent with a truncating divide. E.g. `truncate(x / y) *
+y + truncate_mod(x, y) = x`.
+
+*NOTE*: `Mod` supports broadcasting. More about broadcasting
+[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+END
+}
+op {
+ graph_op_name: "Mul"
+ endpoint {
+ name: "Mul"
+ }
+ summary: "Returns x * y element-wise."
+ description: <<END
+*NOTE*: `Mul` supports broadcasting. More about broadcasting
+[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+END
+}
+op {
+ graph_op_name: "Multinomial"
+ endpoint {
+ name: "Multinomial"
+ }
+ summary: "Draws samples from a multinomial distribution."
+}
+op {
+ graph_op_name: "MutableDenseHashTable"
+ endpoint {
+ name: "MutableDenseHashTable"
+ }
+ summary: "Creates an empty hash table that uses tensors as the backing store."
+ description: <<END
+It uses "open addressing" with quadratic reprobing to resolve
+collisions.
+
+This op creates a mutable hash table, specifying the type of its keys and
+values. Each value must be a scalar. Data can be inserted into the table using
+the insert operations. It does not support the initialization operation.
+END
+}
+op {
+ graph_op_name: "MutableDenseHashTableV2"
+ endpoint {
+ name: "MutableDenseHashTableV2"
+ }
+ summary: "Creates an empty hash table that uses tensors as the backing store."
+ description: <<END
+It uses "open addressing" with quadratic reprobing to resolve
+collisions.
+
+This op creates a mutable hash table, specifying the type of its keys and
+values. Each value must be a scalar. Data can be inserted into the table using
+the insert operations. It does not support the initialization operation.
+END
+}
+op {
+ graph_op_name: "MutableHashTable"
+ endpoint {
+ name: "MutableHashTable"
+ }
+ summary: "Creates an empty hash table."
+ description: <<END
+This op creates a mutable hash table, specifying the type of its keys and
+values. Each value must be a scalar. Data can be inserted into the table using
+the insert operations. It does not support the initialization operation.
+END
+}
+op {
+ graph_op_name: "MutableHashTableOfTensors"
+ endpoint {
+ name: "MutableHashTableOfTensors"
+ }
+ summary: "Creates an empty hash table."
+ description: <<END
+This op creates a mutable hash table, specifying the type of its keys and
+values. Each value must be a vector. Data can be inserted into the table using
+the insert operations. It does not support the initialization operation.
+END
+}
+op {
+ graph_op_name: "MutableHashTableOfTensorsV2"
+ endpoint {
+ name: "MutableHashTableOfTensorsV2"
+ }
+ summary: "Creates an empty hash table."
+ description: <<END
+This op creates a mutable hash table, specifying the type of its keys and
+values. Each value must be a vector. Data can be inserted into the table using
+the insert operations. It does not support the initialization operation.
+END
+}
+op {
+ graph_op_name: "MutableHashTableV2"
+ endpoint {
+ name: "MutableHashTableV2"
+ }
+ summary: "Creates an empty hash table."
+ description: <<END
+This op creates a mutable hash table, specifying the type of its keys and
+values. Each value must be a scalar. Data can be inserted into the table using
+the insert operations. It does not support the initialization operation.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_N.pbtxt b/tensorflow/core/api_def/base_api/api_def_N.pbtxt
new file mode 100644
index 0000000000..0298a42cab
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_N.pbtxt
@@ -0,0 +1,94 @@
+op {
+ graph_op_name: "Neg"
+ endpoint {
+ name: "Neg"
+ }
+ summary: "Computes numerical negative value element-wise."
+ description: <<END
+I.e., \\(y = -x\\).
+END
+}
+op {
+ graph_op_name: "NegTrain"
+ endpoint {
+ name: "NegTrain"
+ }
+ summary: "Training via negative sampling."
+}
+op {
+ graph_op_name: "NextIteration"
+ endpoint {
+ name: "NextIteration"
+ }
+ summary: "Makes its input available to the next iteration."
+}
+op {
+ graph_op_name: "NoOp"
+ endpoint {
+ name: "NoOp"
+ }
+ summary: "Does nothing. Only useful as a placeholder for control edges."
+}
+op {
+ graph_op_name: "NonMaxSuppression"
+ endpoint {
+ name: "NonMaxSuppression"
+ }
+ summary: "Greedily selects a subset of bounding boxes in descending order of score,"
+ description: <<END
+pruning away boxes that have high intersection-over-union (IOU) overlap
+with previously selected boxes. Bounding boxes are supplied as
+[y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any
+diagonal pair of box corners and the coordinates can be provided as normalized
+(i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm
+is agnostic to where the origin is in the coordinate system. Note that this
+algorithm is invariant to orthogonal transformations and translations
+of the coordinate system; thus translating or reflections of the coordinate
+system result in the same boxes being selected by the algorithm.
+The output of this operation is a set of integers indexing into the input
+collection of bounding boxes representing the selected boxes. The bounding
+box coordinates corresponding to the selected indices can then be obtained
+using the `tf.gather operation`. For example:
+ selected_indices = tf.image.non_max_suppression(
+ boxes, scores, max_output_size, iou_threshold)
+ selected_boxes = tf.gather(boxes, selected_indices)
+END
+}
+op {
+ graph_op_name: "NonMaxSuppressionV2"
+ endpoint {
+ name: "NonMaxSuppressionV2"
+ }
+ summary: "Greedily selects a subset of bounding boxes in descending order of score,"
+ description: <<END
+pruning away boxes that have high intersection-over-union (IOU) overlap
+with previously selected boxes. Bounding boxes are supplied as
+[y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any
+diagonal pair of box corners and the coordinates can be provided as normalized
+(i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm
+is agnostic to where the origin is in the coordinate system. Note that this
+algorithm is invariant to orthogonal transformations and translations
+of the coordinate system; thus translating or reflections of the coordinate
+system result in the same boxes being selected by the algorithm.
+
+The output of this operation is a set of integers indexing into the input
+collection of bounding boxes representing the selected boxes. The bounding
+box coordinates corresponding to the selected indices can then be obtained
+using the `tf.gather operation`. For example:
+
+ selected_indices = tf.image.non_max_suppression_v2(
+ boxes, scores, max_output_size, iou_threshold)
+ selected_boxes = tf.gather(boxes, selected_indices)
+END
+}
+op {
+ graph_op_name: "NotEqual"
+ endpoint {
+ name: "NotEqual"
+ }
+ summary: "Returns the truth value of (x != y) element-wise."
+ description: <<END
+*NOTE*: `NotEqual` supports broadcasting. More about broadcasting
+[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_O.pbtxt b/tensorflow/core/api_def/base_api/api_def_O.pbtxt
new file mode 100644
index 0000000000..3c62335da9
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_O.pbtxt
@@ -0,0 +1,195 @@
+op {
+ graph_op_name: "OneHot"
+ endpoint {
+ name: "OneHot"
+ }
+ summary: "Returns a one-hot tensor."
+ description: <<END
+The locations represented by indices in `indices` take value `on_value`,
+while all other locations take value `off_value`.
+
+If the input `indices` is rank `N`, the output will have rank `N+1`,
+The new axis is created at dimension `axis` (default: the new axis is
+appended at the end).
+
+If `indices` is a scalar the output shape will be a vector of length `depth`.
+
+If `indices` is a vector of length `features`, the output shape will be:
+```
+ features x depth if axis == -1
+ depth x features if axis == 0
+```
+
+If `indices` is a matrix (batch) with shape `[batch, features]`,
+the output shape will be:
+```
+ batch x features x depth if axis == -1
+ batch x depth x features if axis == 1
+ depth x batch x features if axis == 0
+```
+
+
+Examples
+=========
+
+Suppose that
+
+```
+ indices = [0, 2, -1, 1]
+ depth = 3
+ on_value = 5.0
+ off_value = 0.0
+ axis = -1
+```
+
+Then output is `[4 x 3]`:
+
+ ```output =
+ [5.0 0.0 0.0] // one_hot(0)
+ [0.0 0.0 5.0] // one_hot(2)
+ [0.0 0.0 0.0] // one_hot(-1)
+ [0.0 5.0 0.0] // one_hot(1)
+ ```
+
+Suppose that
+
+```
+ indices = [0, 2, -1, 1]
+ depth = 3
+ on_value = 0.0
+ off_value = 3.0
+ axis = 0
+```
+
+Then output is `[3 x 4]`:
+
+ ```output =
+ [0.0 3.0 3.0 3.0]
+ [3.0 3.0 3.0 0.0]
+ [3.0 3.0 3.0 3.0]
+ [3.0 0.0 3.0 3.0]
+ // ^ one_hot(0)
+ // ^ one_hot(2)
+ // ^ one_hot(-1)
+ // ^ one_hot(1)
+ ```
+Suppose that
+
+```
+ indices = [[0, 2], [1, -1]]
+ depth = 3
+ on_value = 1.0
+ off_value = 0.0
+ axis = -1
+```
+
+Then output is `[2 x 2 x 3]`:
+
+ ```output =
+ [
+ [1.0, 0.0, 0.0] // one_hot(0)
+ [0.0, 0.0, 1.0] // one_hot(2)
+ ][
+ [0.0, 1.0, 0.0] // one_hot(1)
+ [0.0, 0.0, 0.0] // one_hot(-1)
+ ]```
+END
+}
+op {
+ graph_op_name: "OneShotIterator"
+ endpoint {
+ name: "OneShotIterator"
+ }
+ summary: "Makes a \"one-shot\" iterator that can be iterated only once."
+ description: <<END
+A one-shot iterator bundles the logic for defining the dataset and
+the state of the iterator in a single op, which allows simple input
+pipelines to be defined without an additional initialization
+("MakeIterator") step.
+
+One-shot iterators have the following limitations:
+
+* They do not support parameterization: all logic for creating the underlying
+ dataset must be bundled in the `dataset_factory` function.
+* They are not resettable. Once a one-shot iterator reaches the end of its
+ underlying dataset, subsequent "IteratorGetNext" operations on that
+ iterator will always produce an `OutOfRange` error.
+
+For greater flexibility, use "Iterator" and "MakeIterator" to define
+an iterator using an arbitrary subgraph, which may capture tensors
+(including fed values) as parameters, and which may be reset multiple
+times by rerunning "MakeIterator".
+END
+}
+op {
+ graph_op_name: "OnesLike"
+ endpoint {
+ name: "OnesLike"
+ }
+ summary: "Returns a tensor of ones with the same shape and type as x."
+}
+op {
+ graph_op_name: "OrderedMapClear"
+ endpoint {
+ name: "OrderedMapClear"
+ }
+ summary: "Op removes all elements in the underlying container."
+}
+op {
+ graph_op_name: "OrderedMapIncompleteSize"
+ endpoint {
+ name: "OrderedMapIncompleteSize"
+ }
+ summary: "Op returns the number of incomplete elements in the underlying container."
+}
+op {
+ graph_op_name: "OrderedMapPeek"
+ endpoint {
+ name: "OrderedMapPeek"
+ }
+ summary: "Op peeks at the values at the specified key. If the"
+ description: <<END
+underlying container does not contain this key
+this op will block until it does. This Op is optimized for
+performance.
+END
+}
+op {
+ graph_op_name: "OrderedMapSize"
+ endpoint {
+ name: "OrderedMapSize"
+ }
+ summary: "Op returns the number of elements in the underlying container."
+}
+op {
+ graph_op_name: "OrderedMapStage"
+ endpoint {
+ name: "OrderedMapStage"
+ }
+ summary: "Stage (key, values) in the underlying container which behaves like a ordered"
+ description: <<END
+associative container. Elements are ordered by key.
+END
+}
+op {
+ graph_op_name: "OrderedMapUnstage"
+ endpoint {
+ name: "OrderedMapUnstage"
+ }
+ summary: "Op removes and returns the values associated with the key"
+ description: <<END
+from the underlying container. If the underlying container
+does not contain this key, the op will block until it does.
+END
+}
+op {
+ graph_op_name: "OrderedMapUnstageNoKey"
+ endpoint {
+ name: "OrderedMapUnstageNoKey"
+ }
+ summary: "Op removes and returns the (key, value) element with the smallest"
+ description: <<END
+key from the underlying container. If the underlying container
+does not contain elements, the op will block until it does.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_P.pbtxt b/tensorflow/core/api_def/base_api/api_def_P.pbtxt
new file mode 100644
index 0000000000..a3abb079e9
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_P.pbtxt
@@ -0,0 +1,431 @@
+op {
+ graph_op_name: "Pack"
+ endpoint {
+ name: "Pack"
+ }
+ summary: "Packs a list of `N` rank-`R` tensors into one rank-`(R+1)` tensor."
+ description: <<END
+Packs the `N` tensors in `values` into a tensor with rank one higher than each
+tensor in `values`, by packing them along the `axis` dimension.
+Given a list of tensors of shape `(A, B, C)`;
+
+if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`.
+if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`.
+Etc.
+
+For example:
+
+```
+# 'x' is [1, 4]
+# 'y' is [2, 5]
+# 'z' is [3, 6]
+pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim.
+pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]]
+```
+
+This is the opposite of `unpack`.
+END
+}
+op {
+ graph_op_name: "Pad"
+ endpoint {
+ name: "Pad"
+ }
+ summary: "Pads a tensor with zeros."
+ description: <<END
+This operation pads a `input` with zeros according to the `paddings` you
+specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is the
+rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates
+how many zeros to add before the contents of `input` in that dimension, and
+`paddings[D, 1]` indicates how many zeros to add after the contents of `input`
+in that dimension.
+
+The padded size of each dimension D of the output is:
+
+`paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
+
+For example:
+
+```
+# 't' is [[1, 1], [2, 2]]
+# 'paddings' is [[1, 1], [2, 2]]
+# rank of 't' is 2
+pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0]
+ [0, 0, 1, 1, 0, 0]
+ [0, 0, 2, 2, 0, 0]
+ [0, 0, 0, 0, 0, 0]]
+```
+END
+}
+op {
+ graph_op_name: "PadV2"
+ endpoint {
+ name: "PadV2"
+ }
+ summary: "Pads a tensor."
+ description: <<END
+This operation pads `input` according to the `paddings` and `constant_values`
+you specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is
+the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates
+how many padding values to add before the contents of `input` in that dimension,
+and `paddings[D, 1]` indicates how many padding values to add after the contents
+of `input` in that dimension. `constant_values` is a scalar tensor of the same
+type as `input` that indicates the value to use for padding `input`.
+
+The padded size of each dimension D of the output is:
+
+`paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
+
+For example:
+
+```
+# 't' is [[1, 1], [2, 2]]
+# 'paddings' is [[1, 1], [2, 2]]
+# 'constant_values' is 0
+# rank of 't' is 2
+pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0]
+ [0, 0, 1, 1, 0, 0]
+ [0, 0, 2, 2, 0, 0]
+ [0, 0, 0, 0, 0, 0]]
+```
+END
+}
+op {
+ graph_op_name: "PaddedBatchDataset"
+ endpoint {
+ name: "PaddedBatchDataset"
+ }
+ summary: "Creates a dataset that batches and pads `batch_size` elements from the input."
+}
+op {
+ graph_op_name: "PaddingFIFOQueue"
+ endpoint {
+ name: "PaddingFIFOQueue"
+ }
+ summary: "A queue that produces elements in first-in first-out order."
+ description: <<END
+Variable-size shapes are allowed by setting the corresponding shape dimensions
+to 0 in the shape attr. In this case DequeueMany will pad up to the maximum
+size of any given element in the minibatch. See below for details.
+END
+}
+op {
+ graph_op_name: "PaddingFIFOQueueV2"
+ endpoint {
+ name: "PaddingFIFOQueueV2"
+ }
+ summary: "A queue that produces elements in first-in first-out order."
+ description: <<END
+Variable-size shapes are allowed by setting the corresponding shape dimensions
+to 0 in the shape attr. In this case DequeueMany will pad up to the maximum
+size of any given element in the minibatch. See below for details.
+END
+}
+op {
+ graph_op_name: "ParallelConcat"
+ endpoint {
+ name: "ParallelConcat"
+ }
+ summary: "Concatenates a list of `N` tensors along the first dimension."
+ description: <<END
+The input tensors are all required to have size 1 in the first dimension.
+
+For example:
+
+```
+# 'x' is [[1, 4]]
+# 'y' is [[2, 5]]
+# 'z' is [[3, 6]]
+parallel_concat([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim.
+```
+
+The difference between concat and parallel_concat is that concat requires all
+of the inputs be computed before the operation will begin but doesn't require
+that the input shapes be known during graph construction. Parallel concat
+will copy pieces of the input into the output as they become available, in
+some situations this can provide a performance benefit.
+END
+}
+op {
+ graph_op_name: "ParallelDynamicStitch"
+ endpoint {
+ name: "ParallelDynamicStitch"
+ }
+ summary: "Interleave the values from the `data` tensors into a single tensor."
+ description: <<END
+Builds a merged tensor such that
+
+```python
+ merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...]
+```
+
+For example, if each `indices[m]` is scalar or vector, we have
+
+```python
+ # Scalar indices:
+ merged[indices[m], ...] = data[m][...]
+
+ # Vector indices:
+ merged[indices[m][i], ...] = data[m][i, ...]
+```
+
+Each `data[i].shape` must start with the corresponding `indices[i].shape`,
+and the rest of `data[i].shape` must be constant w.r.t. `i`. That is, we
+must have `data[i].shape = indices[i].shape + constant`. In terms of this
+`constant`, the output shape is
+
+ merged.shape = [max(indices)] + constant
+
+Values may be merged in parallel, so if an index appears in both `indices[m][i]`
+and `indices[n][j]`, the result may be invalid. This differs from the normal
+DynamicStitch operator that defines the behavior in that case.
+
+For example:
+
+```python
+ indices[0] = 6
+ indices[1] = [4, 1]
+ indices[2] = [[5, 2], [0, 3]]
+ data[0] = [61, 62]
+ data[1] = [[41, 42], [11, 12]]
+ data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]]
+ merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42],
+ [51, 52], [61, 62]]
+```
+
+This method can be used to merge partitions created by `dynamic_partition`
+as illustrated on the following example:
+
+```python
+ # Apply function (increments x_i) on elements for which a certain condition
+ # apply (x_i != -1 in this example).
+ x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4])
+ condition_mask=tf.not_equal(x,tf.constant(-1.))
+ partitioned_data = tf.dynamic_partition(
+ x, tf.cast(condition_mask, tf.int32) , 2)
+ partitioned_data[1] = partitioned_data[1] + 1.0
+ condition_indices = tf.dynamic_partition(
+ tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2)
+ x = tf.dynamic_stitch(condition_indices, partitioned_data)
+ # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain
+ # unchanged.
+```
+
+<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
+<img style="width:100%" src="https://www.tensorflow.org/images/DynamicStitch.png" alt>
+</div>
+END
+}
+op {
+ graph_op_name: "ParallelMapDataset"
+ endpoint {
+ name: "ParallelMapDataset"
+ }
+ summary: "Creates a dataset that applies `f` to the outputs of `input_dataset`."
+ description: <<END
+Unlike a "MapDataset", which applies `f` sequentially, this dataset invokes up
+to `num_parallel_calls` copies of `f` in parallel.
+END
+}
+op {
+ graph_op_name: "ParameterizedTruncatedNormal"
+ endpoint {
+ name: "ParameterizedTruncatedNormal"
+ }
+ summary: "Outputs random values from a normal distribution. The parameters may each be a"
+ description: <<END
+scalar which applies to the entire output, or a vector of length shape[0] which
+stores the parameters for each batch.
+END
+}
+op {
+ graph_op_name: "ParseExample"
+ endpoint {
+ name: "ParseExample"
+ }
+ summary: "Transforms a vector of brain.Example protos (as strings) into typed tensors."
+}
+op {
+ graph_op_name: "ParseSingleSequenceExample"
+ endpoint {
+ name: "ParseSingleSequenceExample"
+ }
+ summary: "Transforms a scalar brain.SequenceExample proto (as strings) into typed tensors."
+}
+op {
+ graph_op_name: "ParseTensor"
+ endpoint {
+ name: "ParseTensor"
+ }
+ summary: "Transforms a serialized tensorflow.TensorProto proto into a Tensor."
+}
+op {
+ graph_op_name: "Placeholder"
+ endpoint {
+ name: "Placeholder"
+ }
+ summary: "A placeholder op for a value that will be fed into the computation."
+ description: <<END
+N.B. This operation will fail with an error if it is executed. It is
+intended as a way to represent a value that will always be fed, and to
+provide attrs that enable the fed value to be checked at runtime.
+END
+}
+op {
+ graph_op_name: "PlaceholderV2"
+ endpoint {
+ name: "PlaceholderV2"
+ }
+ summary: "A placeholder op for a value that will be fed into the computation."
+ description: <<END
+N.B. This operation will fail with an error if it is executed. It is
+intended as a way to represent a value that will always be fed, and to
+provide attrs that enable the fed value to be checked at runtime.
+END
+}
+op {
+ graph_op_name: "PlaceholderWithDefault"
+ endpoint {
+ name: "PlaceholderWithDefault"
+ }
+ summary: "A placeholder op that passes through `input` when its output is not fed."
+}
+op {
+ graph_op_name: "Polygamma"
+ endpoint {
+ name: "Polygamma"
+ }
+ summary: "Compute the polygamma function \\\\(\\psi^{(n)}(x)\\\\)."
+ description: <<END
+The polygamma function is defined as:
+
+
+\\(\psi^{(n)}(x) = \frac{d^n}{dx^n} \psi(x)\\)
+
+where \\(\psi(x)\\) is the digamma function.
+END
+}
+op {
+ graph_op_name: "PopulationCount"
+ endpoint {
+ name: "PopulationCount"
+ }
+ summary: "Computes element-wise population count (a.k.a. popcount, bitsum, bitcount)."
+ description: <<END
+For each entry in `x`, calculates the number of `1` (on) bits in the binary
+representation of that entry.
+
+**NOTE**: It is more efficient to first `tf.bitcast` your tensors into
+`int32` or `int64` and perform the bitcount on the result, than to feed in
+8- or 16-bit inputs and then aggregate the resulting counts.
+END
+}
+op {
+ graph_op_name: "Pow"
+ endpoint {
+ name: "Pow"
+ }
+ summary: "Computes the power of one value to another."
+ description: <<END
+Given a tensor `x` and a tensor `y`, this operation computes \\(x^y\\) for
+corresponding elements in `x` and `y`. For example:
+
+```
+# tensor 'x' is [[2, 2]], [3, 3]]
+# tensor 'y' is [[8, 16], [2, 3]]
+tf.pow(x, y) ==> [[256, 65536], [9, 27]]
+```
+END
+}
+op {
+ graph_op_name: "PrefetchDataset"
+ endpoint {
+ name: "PrefetchDataset"
+ }
+ summary: "Creates a dataset that asynchronously prefetches elements from `input_dataset`."
+}
+op {
+ graph_op_name: "PreventGradient"
+ endpoint {
+ name: "PreventGradient"
+ }
+ summary: "An identity op that triggers an error if a gradient is requested."
+ description: <<END
+When executed in a graph, this op outputs its input tensor as-is.
+
+When building ops to compute gradients, the TensorFlow gradient system
+will return an error when trying to lookup the gradient of this op,
+because no gradient must ever be registered for this function. This
+op exists to prevent subtle bugs from silently returning unimplemented
+gradients in some corner cases.
+END
+}
+op {
+ graph_op_name: "Print"
+ endpoint {
+ name: "Print"
+ }
+ summary: "Prints a list of tensors."
+ description: <<END
+Passes `input` through to `output` and prints `data` when evaluating.
+END
+}
+op {
+ graph_op_name: "PriorityQueue"
+ endpoint {
+ name: "PriorityQueue"
+ }
+ summary: "A queue that produces elements sorted by the first component value."
+ description: <<END
+Note that the PriorityQueue requires the first component of any element
+to be a scalar int64, in addition to the other elements declared by
+component_types. Therefore calls to Enqueue and EnqueueMany (resp. Dequeue
+and DequeueMany) on a PriorityQueue will all require (resp. output) one extra
+entry in their input (resp. output) lists.
+END
+}
+op {
+ graph_op_name: "PriorityQueueV2"
+ endpoint {
+ name: "PriorityQueueV2"
+ }
+ summary: "A queue that produces elements sorted by the first component value."
+ description: <<END
+Note that the PriorityQueue requires the first component of any element
+to be a scalar int64, in addition to the other elements declared by
+component_types. Therefore calls to Enqueue and EnqueueMany (resp. Dequeue
+and DequeueMany) on a PriorityQueue will all require (resp. output) one extra
+entry in their input (resp. output) lists.
+END
+}
+op {
+ graph_op_name: "Prod"
+ endpoint {
+ name: "Prod"
+ }
+ summary: "Computes the product of elements across dimensions of a tensor."
+ description: <<END
+Reduces `input` along the dimensions given in `reduction_indices`. Unless
+`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
+`reduction_indices`. If `keep_dims` is true, the reduced dimensions are
+retained with length 1.
+END
+}
+op {
+ graph_op_name: "PyFunc"
+ endpoint {
+ name: "PyFunc"
+ }
+ summary: "Invokes a python function to compute func(input)->output."
+ description: <<END
+This operation is considered stateful. For a stateless version, see
+PyFuncStateless.
+END
+}
+op {
+ graph_op_name: "PyFuncStateless"
+ endpoint {
+ name: "PyFuncStateless"
+ }
+ summary: "A stateless version of PyFunc."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Q.pbtxt b/tensorflow/core/api_def/base_api/api_def_Q.pbtxt
new file mode 100644
index 0000000000..4af60a1841
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Q.pbtxt
@@ -0,0 +1,609 @@
+op {
+ graph_op_name: "Qr"
+ endpoint {
+ name: "Qr"
+ }
+ summary: "Computes the QR decompositions of one or more matrices."
+ description: <<END
+Computes the QR decomposition of each inner matrix in `tensor` such that
+`tensor[..., :, :] = q[..., :, :] * r[..., :,:])`
+
+```python
+# a is a tensor.
+# q is a tensor of orthonormal matrices.
+# r is a tensor of upper triangular matrices.
+q, r = qr(a)
+q_full, r_full = qr(a, full_matrices=True)
+```
+END
+}
+op {
+ graph_op_name: "QuantizeAndDequantize"
+ endpoint {
+ name: "QuantizeAndDequantize"
+ }
+ summary: "Use QuantizeAndDequantizeV2 instead."
+}
+op {
+ graph_op_name: "QuantizeAndDequantizeV2"
+ endpoint {
+ name: "QuantizeAndDequantizeV2"
+ }
+ summary: "Quantizes then dequantizes a tensor."
+ description: <<END
+This op simulates the precision loss from the quantized forward pass by:
+1. Quantizing the tensor to fixed point numbers, which should match the target
+ quantization method when it is used in inference.
+2. Dequantizing it back to floating point numbers for the following ops, most
+ likely matmul.
+
+There are different ways to quantize. This version does not use the full range
+of the output type, choosing to elide the lowest possible value for symmetry
+(e.g., output range is -127 to 127, not -128 to 127 for signed 8 bit
+quantization), so that 0.0 maps to 0.
+
+To perform this op, we first find the range of values in our tensor. The range
+we use is always centered on 0, so we find m such that
+
+1. m = max(abs(input_min), abs(input_max)) if range_given is true,
+2. m = max(abs(min_elem(input)), abs(max_elem(input))) otherwise.
+
+Our input tensor range is then [-m, m].
+
+Next, we choose our fixed-point quantization buckets, [min_fixed, max_fixed].
+If signed_input is true, this is
+
+ [min_fixed, max_fixed ] =
+ [-(1 << (num_bits - 1) - 1), (1 << (num_bits - 1)) - 1].
+
+Otherwise, if signed_input is false, the fixed-point range is
+
+ [min_fixed, max_fixed] = [0, (1 << num_bits) - 1].
+
+From this we compute our scaling factor, s:
+
+ s = (max_fixed - min_fixed) / (2 * m).
+
+Now we can quantize and dequantize the elements of our tensor. An element e
+is transformed into e':
+
+ e' = (e * s).round_to_nearest() / s.
+
+Note that we have a different number of buckets in the signed vs. unsigned
+cases. For example, if num_bits == 8, we get 254 buckets in the signed case
+vs. 255 in the unsigned case.
+
+For example, suppose num_bits = 8 and m = 1. Then
+
+ [min_fixed, max_fixed] = [-127, 127], and
+ s = (127 + 127) / 2 = 127.
+
+Given the vector {-1, -0.5, 0, 0.3}, this is quantized to
+{-127, -63, 0, 38}, and dequantized to {-1, -63.0/127, 0, 38.0/127}.
+END
+}
+op {
+ graph_op_name: "QuantizeAndDequantizeV3"
+ endpoint {
+ name: "QuantizeAndDequantizeV3"
+ }
+ summary: "Quantizes then dequantizes a tensor."
+ description: <<END
+This is almost identical to QuantizeAndDequantizeV2, except that num_bits is a
+tensor, so its value can change during training.
+END
+}
+op {
+ graph_op_name: "QuantizeDownAndShrinkRange"
+ endpoint {
+ name: "QuantizeDownAndShrinkRange"
+ }
+ summary: "Convert the quantized \'input\' tensor into a lower-precision \'output\', using the"
+ description: <<END
+actual distribution of the values to maximize the usage of the lower bit depth
+and adjusting the output min and max ranges accordingly.
+
+[input_min, input_max] are scalar floats that specify the range for the float
+interpretation of the 'input' data. For example, if input_min is -1.0f and
+input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0
+value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f.
+
+This operator tries to squeeze as much precision as possible into an output with
+a lower bit depth by calculating the actual min and max values found in the
+data. For example, maybe that quint16 input has no values lower than 16,384 and
+none higher than 49,152. That means only half the range is actually needed, all
+the float interpretations are between -0.5f and 0.5f, so if we want to compress
+the data into a quint8 output, we can use that range rather than the theoretical
+-1.0f to 1.0f that is suggested by the input min and max.
+
+In practice, this is most useful for taking output from operations like
+QuantizedMatMul that can produce higher bit-depth outputs than their inputs and
+may have large potential output ranges, but in practice have a distribution of
+input values that only uses a small fraction of the possible range. By feeding
+that output into this operator, we can reduce it from 32 bits down to 8 with
+minimal loss of accuracy.
+END
+}
+op {
+ graph_op_name: "QuantizeV2"
+ endpoint {
+ name: "QuantizeV2"
+ }
+ summary: "Quantize the \'input\' tensor of type float to \'output\' tensor of type \'T\'."
+ description: <<END
+[min_range, max_range] are scalar floats that specify the range for
+the 'input' data. The 'mode' attribute controls exactly which calculations are
+used to convert the float values to their quantized equivalents.
+
+In 'MIN_COMBINED' mode, each value of the tensor will undergo the following:
+
+```
+out[i] = (in[i] - min_range) * range(T) / (max_range - min_range)
+if T == qint8, out[i] -= (range(T) + 1) / 2.0
+```
+here `range(T) = numeric_limits<T>::max() - numeric_limits<T>::min()`
+
+*MIN_COMBINED Mode Example*
+
+Assume the input is type float and has a possible range of [0.0, 6.0] and the
+output type is quint8 ([0, 255]). The min_range and max_range values should be
+specified as 0.0 and 6.0. Quantizing from float to quint8 will multiply each
+value of the input by 255/6 and cast to quint8.
+
+If the output type was qint8 ([-128, 127]), the operation will additionally
+subtract each value by 128 prior to casting, so that the range of values aligns
+with the range of qint8.
+
+If the mode is 'MIN_FIRST', then this approach is used:
+
+```
+number_of_steps = 1 << (# of bits in T)
+range_adjust = number_of_steps / (number_of_steps - 1)
+range = (range_max - range_min) * range_adjust
+range_scale = number_of_steps / range
+quantized = round(input * range_scale) - round(range_min * range_scale) +
+ numeric_limits<T>::min()
+quantized = max(quantized, numeric_limits<T>::min())
+quantized = min(quantized, numeric_limits<T>::max())
+```
+
+The biggest difference between this and MIN_COMBINED is that the minimum range
+is rounded first, before it's subtracted from the rounded value. With
+MIN_COMBINED, a small bias is introduced where repeated iterations of quantizing
+and dequantizing will introduce a larger and larger error.
+
+*SCALED mode Example*
+
+`SCALED` mode matches the quantization approach used in
+`QuantizeAndDequantize{V2|V3}`.
+
+If the mode is `SCALED`, we do not use the full range of the output type,
+choosing to elide the lowest possible value for symmetry (e.g., output range is
+-127 to 127, not -128 to 127 for signed 8 bit quantization), so that 0.0 maps to
+0.
+
+We first find the range of values in our tensor. The
+range we use is always centered on 0, so we find m such that
+```c++
+ m = max(abs(input_min), abs(input_max))
+```
+
+Our input tensor range is then `[-m, m]`.
+
+Next, we choose our fixed-point quantization buckets, `[min_fixed, max_fixed]`.
+If T is signed, this is
+```
+ num_bits = sizeof(T) * 8
+ [min_fixed, max_fixed] =
+ [-(1 << (num_bits - 1) - 1), (1 << (num_bits - 1)) - 1]
+```
+
+Otherwise, if T is unsigned, the fixed-point range is
+```
+ [min_fixed, max_fixed] = [0, (1 << num_bits) - 1]
+```
+
+From this we compute our scaling factor, s:
+```c++
+ s = (max_fixed - min_fixed) / (2 * m)
+```
+
+Now we can quantize the elements of our tensor:
+```c++
+result = (input * s).round_to_nearest()
+```
+
+One thing to watch out for is that the operator may choose to adjust the
+requested minimum and maximum values slightly during the quantization process,
+so you should always use the output ports as the range for further calculations.
+For example, if the requested minimum and maximum values are close to equal,
+they will be separated by a small epsilon value to prevent ill-formed quantized
+buffers from being created. Otherwise, you can end up with buffers where all the
+quantized values map to the same float value, which causes problems for
+operations that have to perform further calculations on them.
+END
+}
+op {
+ graph_op_name: "QuantizedAdd"
+ endpoint {
+ name: "QuantizedAdd"
+ }
+ summary: "Returns x + y element-wise, working on quantized buffers."
+}
+op {
+ graph_op_name: "QuantizedAvgPool"
+ endpoint {
+ name: "QuantizedAvgPool"
+ }
+ summary: "Produces the average pool of the input tensor for quantized types."
+}
+op {
+ graph_op_name: "QuantizedBatchNormWithGlobalNormalization"
+ endpoint {
+ name: "QuantizedBatchNormWithGlobalNormalization"
+ }
+ summary: "Quantized Batch normalization."
+ description: <<END
+This op is deprecated and will be removed in the future. Prefer
+`tf.nn.batch_normalization`.
+END
+}
+op {
+ graph_op_name: "QuantizedBiasAdd"
+ endpoint {
+ name: "QuantizedBiasAdd"
+ }
+ summary: "Adds Tensor \'bias\' to Tensor \'input\' for Quantized types."
+ description: <<END
+Broadcasts the values of bias on dimensions 0..N-2 of 'input'.
+END
+}
+op {
+ graph_op_name: "QuantizedConcat"
+ endpoint {
+ name: "QuantizedConcat"
+ }
+ summary: "Concatenates quantized tensors along one dimension."
+}
+op {
+ graph_op_name: "QuantizedConv2D"
+ endpoint {
+ name: "QuantizedConv2D"
+ }
+ summary: "Computes a 2D convolution given quantized 4D input and filter tensors."
+ description: <<END
+The inputs are quantized tensors where the lowest value represents the real
+number of the associated minimum, and the highest represents the maximum.
+This means that you can only interpret the quantized output in the same way, by
+taking the returned minimum and maximum values into account.
+END
+}
+op {
+ graph_op_name: "QuantizedInstanceNorm"
+ endpoint {
+ name: "QuantizedInstanceNorm"
+ }
+ summary: "Quantized Instance normalization."
+}
+op {
+ graph_op_name: "QuantizedMatMul"
+ endpoint {
+ name: "QuantizedMatMul"
+ }
+ summary: "Perform a quantized matrix multiplication of `a` by the matrix `b`."
+ description: <<END
+The inputs must be two-dimensional matrices and the inner dimension of
+`a` (after being transposed if `transpose_a` is non-zero) must match the
+outer dimension of `b` (after being transposed if `transposed_b` is
+non-zero).
+END
+}
+op {
+ graph_op_name: "QuantizedMaxPool"
+ endpoint {
+ name: "QuantizedMaxPool"
+ }
+ summary: "Produces the max pool of the input tensor for quantized types."
+}
+op {
+ graph_op_name: "QuantizedMul"
+ endpoint {
+ name: "QuantizedMul"
+ }
+ summary: "Returns x * y element-wise, working on quantized buffers."
+}
+op {
+ graph_op_name: "QuantizedRelu"
+ endpoint {
+ name: "QuantizedRelu"
+ }
+ summary: "Computes Quantized Rectified Linear: `max(features, 0)`"
+}
+op {
+ graph_op_name: "QuantizedRelu6"
+ endpoint {
+ name: "QuantizedRelu6"
+ }
+ summary: "Computes Quantized Rectified Linear 6: `min(max(features, 0), 6)`"
+}
+op {
+ graph_op_name: "QuantizedReluX"
+ endpoint {
+ name: "QuantizedReluX"
+ }
+ summary: "Computes Quantized Rectified Linear X: `min(max(features, 0), max_value)`"
+}
+op {
+ graph_op_name: "QuantizedReshape"
+ endpoint {
+ name: "QuantizedReshape"
+ }
+ summary: "Reshapes a quantized tensor as per the Reshape op."
+ description: <<END
+```
+END
+}
+op {
+ graph_op_name: "QuantizedResizeBilinear"
+ endpoint {
+ name: "QuantizedResizeBilinear"
+ }
+ summary: "Resize quantized `images` to `size` using quantized bilinear interpolation."
+ description: <<END
+Input images and output images must be quantized types.
+END
+}
+op {
+ graph_op_name: "QueueClose"
+ endpoint {
+ name: "QueueClose"
+ }
+ summary: "Closes the given queue."
+ description: <<END
+This operation signals that no more elements will be enqueued in the
+given queue. Subsequent Enqueue(Many) operations will fail.
+Subsequent Dequeue(Many) operations will continue to succeed if
+sufficient elements remain in the queue. Subsequent Dequeue(Many)
+operations that would block will fail immediately.
+END
+}
+op {
+ graph_op_name: "QueueCloseV2"
+ endpoint {
+ name: "QueueCloseV2"
+ }
+ summary: "Closes the given queue."
+ description: <<END
+This operation signals that no more elements will be enqueued in the
+given queue. Subsequent Enqueue(Many) operations will fail.
+Subsequent Dequeue(Many) operations will continue to succeed if
+sufficient elements remain in the queue. Subsequent Dequeue(Many)
+operations that would block will fail immediately.
+END
+}
+op {
+ graph_op_name: "QueueDequeue"
+ endpoint {
+ name: "QueueDequeue"
+ }
+ summary: "Dequeues a tuple of one or more tensors from the given queue."
+ description: <<END
+This operation has k outputs, where k is the number of components
+in the tuples stored in the given queue, and output i is the ith
+component of the dequeued tuple.
+
+N.B. If the queue is empty, this operation will block until an element
+has been dequeued (or 'timeout_ms' elapses, if specified).
+END
+}
+op {
+ graph_op_name: "QueueDequeueMany"
+ endpoint {
+ name: "QueueDequeueMany"
+ }
+ summary: "Dequeues `n` tuples of one or more tensors from the given queue."
+ description: <<END
+If the queue is closed and there are fewer than `n` elements, then an
+OutOfRange error is returned.
+
+This operation concatenates queue-element component tensors along the
+0th dimension to make a single component tensor. All of the components
+in the dequeued tuple will have size `n` in the 0th dimension.
+
+This operation has `k` outputs, where `k` is the number of components in
+the tuples stored in the given queue, and output `i` is the ith
+component of the dequeued tuple.
+
+N.B. If the queue is empty, this operation will block until `n` elements
+have been dequeued (or 'timeout_ms' elapses, if specified).
+END
+}
+op {
+ graph_op_name: "QueueDequeueManyV2"
+ endpoint {
+ name: "QueueDequeueManyV2"
+ }
+ summary: "Dequeues `n` tuples of one or more tensors from the given queue."
+ description: <<END
+If the queue is closed and there are fewer than `n` elements, then an
+OutOfRange error is returned.
+
+This operation concatenates queue-element component tensors along the
+0th dimension to make a single component tensor. All of the components
+in the dequeued tuple will have size `n` in the 0th dimension.
+
+This operation has `k` outputs, where `k` is the number of components in
+the tuples stored in the given queue, and output `i` is the ith
+component of the dequeued tuple.
+
+N.B. If the queue is empty, this operation will block until `n` elements
+have been dequeued (or 'timeout_ms' elapses, if specified).
+END
+}
+op {
+ graph_op_name: "QueueDequeueUpTo"
+ endpoint {
+ name: "QueueDequeueUpTo"
+ }
+ summary: "Dequeues `n` tuples of one or more tensors from the given queue."
+ description: <<END
+This operation is not supported by all queues. If a queue does not support
+DequeueUpTo, then an Unimplemented error is returned.
+
+If the queue is closed and there are more than 0 but less than `n`
+elements remaining, then instead of returning an OutOfRange error like
+QueueDequeueMany, less than `n` elements are returned immediately. If
+the queue is closed and there are 0 elements left in the queue, then
+an OutOfRange error is returned just like in QueueDequeueMany.
+Otherwise the behavior is identical to QueueDequeueMany:
+
+This operation concatenates queue-element component tensors along the
+0th dimension to make a single component tensor. All of the components
+in the dequeued tuple will have size `n` in the 0th dimension.
+
+This operation has k outputs, where `k` is the number of components in
+the tuples stored in the given queue, and output `i` is the ith
+component of the dequeued tuple.
+END
+}
+op {
+ graph_op_name: "QueueDequeueUpToV2"
+ endpoint {
+ name: "QueueDequeueUpToV2"
+ }
+ summary: "Dequeues `n` tuples of one or more tensors from the given queue."
+ description: <<END
+This operation is not supported by all queues. If a queue does not support
+DequeueUpTo, then an Unimplemented error is returned.
+
+If the queue is closed and there are more than 0 but less than `n`
+elements remaining, then instead of returning an OutOfRange error like
+QueueDequeueMany, less than `n` elements are returned immediately. If
+the queue is closed and there are 0 elements left in the queue, then
+an OutOfRange error is returned just like in QueueDequeueMany.
+Otherwise the behavior is identical to QueueDequeueMany:
+
+This operation concatenates queue-element component tensors along the
+0th dimension to make a single component tensor. All of the components
+in the dequeued tuple will have size n in the 0th dimension.
+
+This operation has `k` outputs, where `k` is the number of components in
+the tuples stored in the given queue, and output `i` is the ith
+component of the dequeued tuple.
+END
+}
+op {
+ graph_op_name: "QueueDequeueV2"
+ endpoint {
+ name: "QueueDequeueV2"
+ }
+ summary: "Dequeues a tuple of one or more tensors from the given queue."
+ description: <<END
+This operation has k outputs, where k is the number of components
+in the tuples stored in the given queue, and output i is the ith
+component of the dequeued tuple.
+
+N.B. If the queue is empty, this operation will block until an element
+has been dequeued (or 'timeout_ms' elapses, if specified).
+END
+}
+op {
+ graph_op_name: "QueueEnqueue"
+ endpoint {
+ name: "QueueEnqueue"
+ }
+ summary: "Enqueues a tuple of one or more tensors in the given queue."
+ description: <<END
+The components input has k elements, which correspond to the components of
+tuples stored in the given queue.
+
+N.B. If the queue is full, this operation will block until the given
+element has been enqueued (or 'timeout_ms' elapses, if specified).
+END
+}
+op {
+ graph_op_name: "QueueEnqueueMany"
+ endpoint {
+ name: "QueueEnqueueMany"
+ }
+ summary: "Enqueues zero or more tuples of one or more tensors in the given queue."
+ description: <<END
+This operation slices each component tensor along the 0th dimension to
+make multiple queue elements. All of the tuple components must have the
+same size in the 0th dimension.
+
+The components input has k elements, which correspond to the components of
+tuples stored in the given queue.
+
+N.B. If the queue is full, this operation will block until the given
+elements have been enqueued (or 'timeout_ms' elapses, if specified).
+END
+}
+op {
+ graph_op_name: "QueueEnqueueManyV2"
+ endpoint {
+ name: "QueueEnqueueManyV2"
+ }
+ summary: "Enqueues zero or more tuples of one or more tensors in the given queue."
+ description: <<END
+This operation slices each component tensor along the 0th dimension to
+make multiple queue elements. All of the tuple components must have the
+same size in the 0th dimension.
+
+The components input has k elements, which correspond to the components of
+tuples stored in the given queue.
+
+N.B. If the queue is full, this operation will block until the given
+elements have been enqueued (or 'timeout_ms' elapses, if specified).
+END
+}
+op {
+ graph_op_name: "QueueEnqueueV2"
+ endpoint {
+ name: "QueueEnqueueV2"
+ }
+ summary: "Enqueues a tuple of one or more tensors in the given queue."
+ description: <<END
+The components input has k elements, which correspond to the components of
+tuples stored in the given queue.
+
+N.B. If the queue is full, this operation will block until the given
+element has been enqueued (or 'timeout_ms' elapses, if specified).
+END
+}
+op {
+ graph_op_name: "QueueIsClosed"
+ endpoint {
+ name: "QueueIsClosed"
+ }
+ summary: "Returns true if queue is closed."
+ description: <<END
+This operation returns true if the queue is closed and false if the queue
+is open.
+END
+}
+op {
+ graph_op_name: "QueueIsClosedV2"
+ endpoint {
+ name: "QueueIsClosedV2"
+ }
+ summary: "Returns true if queue is closed."
+ description: <<END
+This operation returns true if the queue is closed and false if the queue
+is open.
+END
+}
+op {
+ graph_op_name: "QueueSize"
+ endpoint {
+ name: "QueueSize"
+ }
+ summary: "Computes the number of elements in the given queue."
+}
+op {
+ graph_op_name: "QueueSizeV2"
+ endpoint {
+ name: "QueueSizeV2"
+ }
+ summary: "Computes the number of elements in the given queue."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_R.pbtxt b/tensorflow/core/api_def/base_api/api_def_R.pbtxt
new file mode 100644
index 0000000000..4c398c9771
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_R.pbtxt
@@ -0,0 +1,1392 @@
+op {
+ graph_op_name: "RFFT"
+ endpoint {
+ name: "RFFT"
+ }
+ summary: "Real-valued fast Fourier transform."
+ description: <<END
+Computes the 1-dimensional discrete Fourier transform of a real-valued signal
+over the inner-most dimension of `input`.
+
+Since the DFT of a real signal is Hermitian-symmetric, `RFFT` only returns the
+`fft_length / 2 + 1` unique components of the FFT: the zero-frequency term,
+followed by the `fft_length / 2` positive-frequency terms.
+
+Along the axis `RFFT` is computed on, if `fft_length` is smaller than the
+corresponding dimension of `input`, the dimension is cropped. If it is larger,
+the dimension is padded with zeros.
+END
+}
+op {
+ graph_op_name: "RFFT2D"
+ endpoint {
+ name: "RFFT2D"
+ }
+ summary: "2D real-valued fast Fourier transform."
+ description: <<END
+Computes the 2-dimensional discrete Fourier transform of a real-valued signal
+over the inner-most 2 dimensions of `input`.
+
+Since the DFT of a real signal is Hermitian-symmetric, `RFFT2D` only returns the
+`fft_length / 2 + 1` unique components of the FFT for the inner-most dimension
+of `output`: the zero-frequency term, followed by the `fft_length / 2`
+positive-frequency terms.
+
+Along each axis `RFFT2D` is computed on, if `fft_length` is smaller than the
+corresponding dimension of `input`, the dimension is cropped. If it is larger,
+the dimension is padded with zeros.
+END
+}
+op {
+ graph_op_name: "RFFT3D"
+ endpoint {
+ name: "RFFT3D"
+ }
+ summary: "3D real-valued fast Fourier transform."
+ description: <<END
+Computes the 3-dimensional discrete Fourier transform of a real-valued signal
+over the inner-most 3 dimensions of `input`.
+
+Since the DFT of a real signal is Hermitian-symmetric, `RFFT3D` only returns the
+`fft_length / 2 + 1` unique components of the FFT for the inner-most dimension
+of `output`: the zero-frequency term, followed by the `fft_length / 2`
+positive-frequency terms.
+
+Along each axis `RFFT3D` is computed on, if `fft_length` is smaller than the
+corresponding dimension of `input`, the dimension is cropped. If it is larger,
+the dimension is padded with zeros.
+END
+}
+op {
+ graph_op_name: "RGBToHSV"
+ endpoint {
+ name: "RGBToHSV"
+ }
+ summary: "Converts one or more images from RGB to HSV."
+ description: <<END
+Outputs a tensor of the same shape as the `images` tensor, containing the HSV
+value of the pixels. The output is only well defined if the value in `images`
+are in `[0,1]`.
+
+`output[..., 0]` contains hue, `output[..., 1]` contains saturation, and
+`output[..., 2]` contains value. All HSV values are in `[0,1]`. A hue of 0
+corresponds to pure red, hue 1/3 is pure green, and 2/3 is pure blue.
+END
+}
+op {
+ graph_op_name: "RandomCrop"
+ endpoint {
+ name: "RandomCrop"
+ }
+ summary: "Randomly crop `image`."
+ description: <<END
+`size` is a 1-D int64 tensor with 2 elements representing the crop height and
+width. The values must be non negative.
+
+This Op picks a random location in `image` and crops a `height` by `width`
+rectangle from that location. The random location is picked so the cropped
+area will fit inside the original image.
+END
+}
+op {
+ graph_op_name: "RandomGamma"
+ endpoint {
+ name: "RandomGamma"
+ }
+ summary: "Outputs random values from the Gamma distribution(s) described by alpha."
+ description: <<END
+This op uses the algorithm by Marsaglia et al. to acquire samples via
+transformation-rejection from pairs of uniform and normal random variables.
+See http://dl.acm.org/citation.cfm?id=358414
+END
+}
+op {
+ graph_op_name: "RandomPoisson"
+ endpoint {
+ name: "RandomPoisson"
+ }
+ summary: "Outputs random values from the Poisson distribution(s) described by rate."
+ description: <<END
+This op uses two algorithms, depending on rate. If rate >= 10, then
+the algorithm by Hormann is used to acquire samples via
+transformation-rejection.
+See http://www.sciencedirect.com/science/article/pii/0167668793909974.
+
+Otherwise, Knuth's algorithm is used to acquire samples via multiplying uniform
+random variables.
+See Donald E. Knuth (1969). Seminumerical Algorithms. The Art of Computer
+Programming, Volume 2. Addison Wesley
+END
+}
+op {
+ graph_op_name: "RandomPoissonV2"
+ endpoint {
+ name: "RandomPoissonV2"
+ }
+ summary: "Outputs random values from the Poisson distribution(s) described by rate."
+ description: <<END
+This op uses two algorithms, depending on rate. If rate >= 10, then
+the algorithm by Hormann is used to acquire samples via
+transformation-rejection.
+See http://www.sciencedirect.com/science/article/pii/0167668793909974.
+
+Otherwise, Knuth's algorithm is used to acquire samples via multiplying uniform
+random variables.
+See Donald E. Knuth (1969). Seminumerical Algorithms. The Art of Computer
+Programming, Volume 2. Addison Wesley
+END
+}
+op {
+ graph_op_name: "RandomShuffle"
+ endpoint {
+ name: "RandomShuffle"
+ }
+ summary: "Randomly shuffles a tensor along its first dimension."
+ description: <<END
+ The tensor is shuffled along dimension 0, such that each `value[j]` is mapped
+ to one and only one `output[i]`. For example, a mapping that might occur for a
+ 3x2 tensor is:
+
+```
+[[1, 2], [[5, 6],
+ [3, 4], ==> [1, 2],
+ [5, 6]] [3, 4]]
+```
+END
+}
+op {
+ graph_op_name: "RandomShuffleQueue"
+ endpoint {
+ name: "RandomShuffleQueue"
+ }
+ summary: "A queue that randomizes the order of elements."
+}
+op {
+ graph_op_name: "RandomShuffleQueueV2"
+ endpoint {
+ name: "RandomShuffleQueueV2"
+ }
+ summary: "A queue that randomizes the order of elements."
+}
+op {
+ graph_op_name: "RandomStandardNormal"
+ endpoint {
+ name: "RandomStandardNormal"
+ }
+ summary: "Outputs random values from a normal distribution."
+ description: <<END
+The generated values will have mean 0 and standard deviation 1.
+END
+}
+op {
+ graph_op_name: "RandomUniform"
+ endpoint {
+ name: "RandomUniform"
+ }
+ summary: "Outputs random values from a uniform distribution."
+ description: <<END
+The generated values follow a uniform distribution in the range `[0, 1)`. The
+lower bound 0 is included in the range, while the upper bound 1 is excluded.
+END
+}
+op {
+ graph_op_name: "RandomUniformInt"
+ endpoint {
+ name: "RandomUniformInt"
+ }
+ summary: "Outputs random integers from a uniform distribution."
+ description: <<END
+The generated values are uniform integers in the range `[minval, maxval)`.
+The lower bound `minval` is included in the range, while the upper bound
+`maxval` is excluded.
+
+The random integers are slightly biased unless `maxval - minval` is an exact
+power of two. The bias is small for values of `maxval - minval` significantly
+smaller than the range of the output (either `2^32` or `2^64`).
+END
+}
+op {
+ graph_op_name: "Range"
+ endpoint {
+ name: "Range"
+ }
+ summary: "Creates a sequence of numbers."
+ description: <<END
+This operation creates a sequence of numbers that begins at `start` and
+extends by increments of `delta` up to but not including `limit`.
+
+For example:
+
+```
+# 'start' is 3
+# 'limit' is 18
+# 'delta' is 3
+tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15]
+```
+END
+}
+op {
+ graph_op_name: "RangeDataset"
+ endpoint {
+ name: "RangeDataset"
+ }
+ summary: "Creates a dataset with a range of values. Corresponds to python\'s xrange."
+}
+op {
+ graph_op_name: "Rank"
+ endpoint {
+ name: "Rank"
+ }
+ summary: "Returns the rank of a tensor."
+ description: <<END
+This operation returns an integer representing the rank of `input`.
+
+For example:
+
+```
+# 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
+# shape of tensor 't' is [2, 2, 3]
+rank(t) ==> 3
+```
+
+**Note**: The rank of a tensor is not the same as the rank of a matrix. The rank
+of a tensor is the number of indices required to uniquely select each element
+of the tensor. Rank is also known as "order", "degree", or "ndims."
+END
+}
+op {
+ graph_op_name: "ReadFile"
+ endpoint {
+ name: "ReadFile"
+ }
+ summary: "Reads and outputs the entire contents of the input filename."
+}
+op {
+ graph_op_name: "ReaderNumRecordsProduced"
+ endpoint {
+ name: "ReaderNumRecordsProduced"
+ }
+ summary: "Returns the number of records this Reader has produced."
+ description: <<END
+This is the same as the number of ReaderRead executions that have
+succeeded.
+END
+}
+op {
+ graph_op_name: "ReaderNumRecordsProducedV2"
+ endpoint {
+ name: "ReaderNumRecordsProducedV2"
+ }
+ summary: "Returns the number of records this Reader has produced."
+ description: <<END
+This is the same as the number of ReaderRead executions that have
+succeeded.
+END
+}
+op {
+ graph_op_name: "ReaderNumWorkUnitsCompleted"
+ endpoint {
+ name: "ReaderNumWorkUnitsCompleted"
+ }
+ summary: "Returns the number of work units this Reader has finished processing."
+}
+op {
+ graph_op_name: "ReaderNumWorkUnitsCompletedV2"
+ endpoint {
+ name: "ReaderNumWorkUnitsCompletedV2"
+ }
+ summary: "Returns the number of work units this Reader has finished processing."
+}
+op {
+ graph_op_name: "ReaderRead"
+ endpoint {
+ name: "ReaderRead"
+ }
+ summary: "Returns the next record (key, value pair) produced by a Reader."
+ description: <<END
+Will dequeue from the input queue if necessary (e.g. when the
+Reader needs to start reading from a new file since it has finished
+with the previous file).
+END
+}
+op {
+ graph_op_name: "ReaderReadUpTo"
+ endpoint {
+ name: "ReaderReadUpTo"
+ }
+ summary: "Returns up to `num_records` (key, value) pairs produced by a Reader."
+ description: <<END
+Will dequeue from the input queue if necessary (e.g. when the
+Reader needs to start reading from a new file since it has finished
+with the previous file).
+It may return less than `num_records` even before the last batch.
+END
+}
+op {
+ graph_op_name: "ReaderReadUpToV2"
+ endpoint {
+ name: "ReaderReadUpToV2"
+ }
+ summary: "Returns up to `num_records` (key, value) pairs produced by a Reader."
+ description: <<END
+Will dequeue from the input queue if necessary (e.g. when the
+Reader needs to start reading from a new file since it has finished
+with the previous file).
+It may return less than `num_records` even before the last batch.
+END
+}
+op {
+ graph_op_name: "ReaderReadV2"
+ endpoint {
+ name: "ReaderReadV2"
+ }
+ summary: "Returns the next record (key, value pair) produced by a Reader."
+ description: <<END
+Will dequeue from the input queue if necessary (e.g. when the
+Reader needs to start reading from a new file since it has finished
+with the previous file).
+END
+}
+op {
+ graph_op_name: "ReaderReset"
+ endpoint {
+ name: "ReaderReset"
+ }
+ summary: "Restore a Reader to its initial clean state."
+}
+op {
+ graph_op_name: "ReaderResetV2"
+ endpoint {
+ name: "ReaderResetV2"
+ }
+ summary: "Restore a Reader to its initial clean state."
+}
+op {
+ graph_op_name: "ReaderRestoreState"
+ endpoint {
+ name: "ReaderRestoreState"
+ }
+ summary: "Restore a reader to a previously saved state."
+ description: <<END
+Not all Readers support being restored, so this can produce an
+Unimplemented error.
+END
+}
+op {
+ graph_op_name: "ReaderRestoreStateV2"
+ endpoint {
+ name: "ReaderRestoreStateV2"
+ }
+ summary: "Restore a reader to a previously saved state."
+ description: <<END
+Not all Readers support being restored, so this can produce an
+Unimplemented error.
+END
+}
+op {
+ graph_op_name: "ReaderSerializeState"
+ endpoint {
+ name: "ReaderSerializeState"
+ }
+ summary: "Produce a string tensor that encodes the state of a Reader."
+ description: <<END
+Not all Readers support being serialized, so this can produce an
+Unimplemented error.
+END
+}
+op {
+ graph_op_name: "ReaderSerializeStateV2"
+ endpoint {
+ name: "ReaderSerializeStateV2"
+ }
+ summary: "Produce a string tensor that encodes the state of a Reader."
+ description: <<END
+Not all Readers support being serialized, so this can produce an
+Unimplemented error.
+END
+}
+op {
+ graph_op_name: "Real"
+ endpoint {
+ name: "Real"
+ }
+ summary: "Returns the real part of a complex number."
+ description: <<END
+Given a tensor `input` of complex numbers, this operation returns a tensor of
+type `float` that is the real part of each element in `input`. All elements in
+`input` must be complex numbers of the form \\(a + bj\\), where *a* is the real
+ part returned by this operation and *b* is the imaginary part.
+
+For example:
+
+```
+# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
+tf.real(input) ==> [-2.25, 3.25]
+```
+END
+}
+op {
+ graph_op_name: "RealDiv"
+ endpoint {
+ name: "RealDiv"
+ }
+ summary: "Returns x / y element-wise for real types."
+ description: <<END
+If `x` and `y` are reals, this will return the floating-point division.
+
+*NOTE*: `Div` supports broadcasting. More about broadcasting
+[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+END
+}
+op {
+ graph_op_name: "Reciprocal"
+ endpoint {
+ name: "Reciprocal"
+ }
+ summary: "Computes the reciprocal of x element-wise."
+ description: <<END
+I.e., \\(y = 1 / x\\).
+END
+}
+op {
+ graph_op_name: "ReciprocalGrad"
+ endpoint {
+ name: "ReciprocalGrad"
+ }
+ summary: "Computes the gradient for the inverse of `x` wrt its input."
+ description: <<END
+Specifically, `grad = -dy * y*y`, where `y = 1/x`, and `dy`
+is the corresponding input gradient.
+END
+}
+op {
+ graph_op_name: "RecordInput"
+ endpoint {
+ name: "RecordInput"
+ }
+ summary: "Emits randomized records."
+}
+op {
+ graph_op_name: "ReduceJoin"
+ endpoint {
+ name: "ReduceJoin"
+ }
+ summary: "Joins a string Tensor across the given dimensions."
+ description: <<END
+Computes the string join across dimensions in the given string Tensor of shape
+`[d_0, d_1, ..., d_n-1]`. Returns a new Tensor created by joining the input
+strings with the given separator (default: empty string). Negative indices are
+counted backwards from the end, with `-1` being equivalent to `n - 1`.
+
+For example:
+
+```python
+# tensor `a` is [["a", "b"], ["c", "d"]]
+tf.reduce_join(a, 0) ==> ["ac", "bd"]
+tf.reduce_join(a, 1) ==> ["ab", "cd"]
+tf.reduce_join(a, -2) = tf.reduce_join(a, 0) ==> ["ac", "bd"]
+tf.reduce_join(a, -1) = tf.reduce_join(a, 1) ==> ["ab", "cd"]
+tf.reduce_join(a, 0, keep_dims=True) ==> [["ac", "bd"]]
+tf.reduce_join(a, 1, keep_dims=True) ==> [["ab"], ["cd"]]
+tf.reduce_join(a, 0, separator=".") ==> ["a.c", "b.d"]
+tf.reduce_join(a, [0, 1]) ==> ["acbd"]
+tf.reduce_join(a, [1, 0]) ==> ["abcd"]
+tf.reduce_join(a, []) ==> ["abcd"]
+```
+END
+}
+op {
+ graph_op_name: "RefEnter"
+ endpoint {
+ name: "RefEnter"
+ }
+ summary: "Creates or finds a child frame, and makes `data` available to the child frame."
+ description: <<END
+The unique `frame_name` is used by the `Executor` to identify frames. If
+`is_constant` is true, `output` is a constant in the child frame; otherwise
+it may be changed in the child frame. At most `parallel_iterations` iterations
+are run in parallel in the child frame.
+END
+}
+op {
+ graph_op_name: "RefExit"
+ endpoint {
+ name: "RefExit"
+ }
+ summary: "Exits the current frame to its parent frame."
+ description: <<END
+Exit makes its input `data` available to the parent frame.
+END
+}
+op {
+ graph_op_name: "RefIdentity"
+ endpoint {
+ name: "RefIdentity"
+ }
+ summary: "Return the same ref tensor as the input ref tensor."
+}
+op {
+ graph_op_name: "RefMerge"
+ endpoint {
+ name: "RefMerge"
+ }
+ summary: "Forwards the value of an available tensor from `inputs` to `output`."
+ description: <<END
+`Merge` waits for at least one of the tensors in `inputs` to become available.
+It is usually combined with `Switch` to implement branching.
+
+`Merge` forwards the first tensor for become available to `output`, and sets
+`value_index` to its index in `inputs`.
+END
+}
+op {
+ graph_op_name: "RefNextIteration"
+ endpoint {
+ name: "RefNextIteration"
+ }
+ summary: "Makes its input available to the next iteration."
+}
+op {
+ graph_op_name: "RefSelect"
+ endpoint {
+ name: "RefSelect"
+ }
+ summary: "Forwards the `index`th element of `inputs` to `output`."
+}
+op {
+ graph_op_name: "RefSwitch"
+ endpoint {
+ name: "RefSwitch"
+ }
+ summary: "Forwards the ref tensor `data` to the output port determined by `pred`."
+ description: <<END
+If `pred` is true, the `data` input is forwarded to `output_true`. Otherwise,
+the data goes to `output_false`.
+
+See also `Switch` and `Merge`.
+END
+}
+op {
+ graph_op_name: "Relu"
+ endpoint {
+ name: "Relu"
+ }
+ summary: "Computes rectified linear: `max(features, 0)`."
+}
+op {
+ graph_op_name: "Relu6"
+ endpoint {
+ name: "Relu6"
+ }
+ summary: "Computes rectified linear 6: `min(max(features, 0), 6)`."
+}
+op {
+ graph_op_name: "Relu6Grad"
+ endpoint {
+ name: "Relu6Grad"
+ }
+ summary: "Computes rectified linear 6 gradients for a Relu6 operation."
+}
+op {
+ graph_op_name: "ReluGrad"
+ endpoint {
+ name: "ReluGrad"
+ }
+ summary: "Computes rectified linear gradients for a Relu operation."
+}
+op {
+ graph_op_name: "RemoteCall"
+ endpoint {
+ name: "RemoteCall"
+ }
+ summary: "Runs function `f` on a remote device indicated by `target`."
+}
+op {
+ graph_op_name: "RemoteFusedGraphExecute"
+ endpoint {
+ name: "RemoteFusedGraphExecute"
+ }
+ summary: "Execute a sub graph on a remote processor."
+ description: <<END
+The graph specifications(such as graph itself, input tensors and output names)
+are stored as a serialized protocol buffer of RemoteFusedGraphExecuteInfo
+as serialized_remote_fused_graph_execute_info.
+The specifications will be passed to a dedicated registered
+remote fused graph executor. The executor will send the graph specifications
+to a remote processor and execute that graph. The execution results
+will be passed to consumer nodes as outputs of this node.
+END
+}
+op {
+ graph_op_name: "RepeatDataset"
+ endpoint {
+ name: "RepeatDataset"
+ }
+ summary: "Creates a dataset that emits the outputs of `input_dataset` `count` times."
+}
+op {
+ graph_op_name: "RequantizationRange"
+ endpoint {
+ name: "RequantizationRange"
+ }
+ summary: "Given a quantized tensor described by (input, input_min, input_max), outputs a"
+ description: <<END
+range that covers the actual values present in that tensor. This op is
+typically used to produce the requested_output_min and requested_output_max for
+Requantize.
+END
+}
+op {
+ graph_op_name: "Requantize"
+ endpoint {
+ name: "Requantize"
+ }
+ summary: "Convert the quantized \'input\' tensor into a lower-precision \'output\', using the"
+ description: <<END
+output range specified with 'requested_output_min' and 'requested_output_max'.
+
+[input_min, input_max] are scalar floats that specify the range for the float
+interpretation of the 'input' data. For example, if input_min is -1.0f and
+input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0
+value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f.
+END
+}
+op {
+ graph_op_name: "Reshape"
+ endpoint {
+ name: "Reshape"
+ }
+ summary: "Reshapes a tensor."
+ description: <<END
+Given `tensor`, this operation returns a tensor that has the same values
+as `tensor` with shape `shape`.
+
+If one component of `shape` is the special value -1, the size of that dimension
+is computed so that the total size remains constant. In particular, a `shape`
+of `[-1]` flattens into 1-D. At most one component of `shape` can be -1.
+
+If `shape` is 1-D or higher, then the operation returns a tensor with shape
+`shape` filled with the values of `tensor`. In this case, the number of elements
+implied by `shape` must be the same as the number of elements in `tensor`.
+
+For example:
+
+```
+# tensor 't' is [1, 2, 3, 4, 5, 6, 7, 8, 9]
+# tensor 't' has shape [9]
+reshape(t, [3, 3]) ==> [[1, 2, 3],
+ [4, 5, 6],
+ [7, 8, 9]]
+
+# tensor 't' is [[[1, 1], [2, 2]],
+# [[3, 3], [4, 4]]]
+# tensor 't' has shape [2, 2, 2]
+reshape(t, [2, 4]) ==> [[1, 1, 2, 2],
+ [3, 3, 4, 4]]
+
+# tensor 't' is [[[1, 1, 1],
+# [2, 2, 2]],
+# [[3, 3, 3],
+# [4, 4, 4]],
+# [[5, 5, 5],
+# [6, 6, 6]]]
+# tensor 't' has shape [3, 2, 3]
+# pass '[-1]' to flatten 't'
+reshape(t, [-1]) ==> [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6]
+
+# -1 can also be used to infer the shape
+
+# -1 is inferred to be 9:
+reshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],
+ [4, 4, 4, 5, 5, 5, 6, 6, 6]]
+# -1 is inferred to be 2:
+reshape(t, [-1, 9]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],
+ [4, 4, 4, 5, 5, 5, 6, 6, 6]]
+# -1 is inferred to be 3:
+reshape(t, [ 2, -1, 3]) ==> [[[1, 1, 1],
+ [2, 2, 2],
+ [3, 3, 3]],
+ [[4, 4, 4],
+ [5, 5, 5],
+ [6, 6, 6]]]
+
+# tensor 't' is [7]
+# shape `[]` reshapes to a scalar
+reshape(t, []) ==> 7
+```
+END
+}
+op {
+ graph_op_name: "ResizeArea"
+ endpoint {
+ name: "ResizeArea"
+ }
+ summary: "Resize `images` to `size` using area interpolation."
+ description: <<END
+Input images can be of different types but output images are always float.
+
+Each output pixel is computed by first transforming the pixel's footprint into
+the input tensor and then averaging the pixels that intersect the footprint. An
+input pixel's contribution to the average is weighted by the fraction of its
+area that intersects the footprint. This is the same as OpenCV's INTER_AREA.
+END
+}
+op {
+ graph_op_name: "ResizeBicubic"
+ endpoint {
+ name: "ResizeBicubic"
+ }
+ summary: "Resize `images` to `size` using bicubic interpolation."
+ description: <<END
+Input images can be of different types but output images are always float.
+END
+}
+op {
+ graph_op_name: "ResizeBicubicGrad"
+ endpoint {
+ name: "ResizeBicubicGrad"
+ }
+ summary: "Computes the gradient of bicubic interpolation."
+}
+op {
+ graph_op_name: "ResizeBilinear"
+ endpoint {
+ name: "ResizeBilinear"
+ }
+ summary: "Resize `images` to `size` using bilinear interpolation."
+ description: <<END
+Input images can be of different types but output images are always float.
+END
+}
+op {
+ graph_op_name: "ResizeBilinearGrad"
+ endpoint {
+ name: "ResizeBilinearGrad"
+ }
+ summary: "Computes the gradient of bilinear interpolation."
+}
+op {
+ graph_op_name: "ResizeNearestNeighbor"
+ endpoint {
+ name: "ResizeNearestNeighbor"
+ }
+ summary: "Resize `images` to `size` using nearest neighbor interpolation."
+}
+op {
+ graph_op_name: "ResizeNearestNeighborGrad"
+ endpoint {
+ name: "ResizeNearestNeighborGrad"
+ }
+ summary: "Computes the gradient of nearest neighbor interpolation."
+}
+op {
+ graph_op_name: "ResourceApplyAdadelta"
+ endpoint {
+ name: "ResourceApplyAdadelta"
+ }
+ summary: "Update \'*var\' according to the adadelta scheme."
+ description: <<END
+accum = rho() * accum + (1 - rho()) * grad.square();
+update = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad;
+update_accum = rho() * update_accum + (1 - rho()) * update.square();
+var -= update;
+END
+}
+op {
+ graph_op_name: "ResourceApplyAdagrad"
+ endpoint {
+ name: "ResourceApplyAdagrad"
+ }
+ summary: "Update \'*var\' according to the adagrad scheme."
+ description: <<END
+accum += grad * grad
+var -= lr * grad * (1 / sqrt(accum))
+END
+}
+op {
+ graph_op_name: "ResourceApplyAdagradDA"
+ endpoint {
+ name: "ResourceApplyAdagradDA"
+ }
+ summary: "Update \'*var\' according to the proximal adagrad scheme."
+}
+op {
+ graph_op_name: "ResourceApplyAdam"
+ endpoint {
+ name: "ResourceApplyAdam"
+ }
+ summary: "Update \'*var\' according to the Adam algorithm."
+ description: <<END
+lr_t <- learning_rate * sqrt(1 - beta2^t) / (1 - beta1^t)
+m_t <- beta1 * m_{t-1} + (1 - beta1) * g_t
+v_t <- beta2 * v_{t-1} + (1 - beta2) * g_t * g_t
+variable <- variable - lr_t * m_t / (sqrt(v_t) + epsilon)
+END
+}
+op {
+ graph_op_name: "ResourceApplyCenteredRMSProp"
+ endpoint {
+ name: "ResourceApplyCenteredRMSProp"
+ }
+ summary: "Update \'*var\' according to the centered RMSProp algorithm."
+ description: <<END
+The centered RMSProp algorithm uses an estimate of the centered second moment
+(i.e., the variance) for normalization, as opposed to regular RMSProp, which
+uses the (uncentered) second moment. This often helps with training, but is
+slightly more expensive in terms of computation and memory.
+
+Note that in dense implementation of this algorithm, mg, ms, and mom will
+update even if the grad is zero, but in this sparse implementation, mg, ms,
+and mom will not update in iterations during which the grad is zero.
+
+mean_square = decay * mean_square + (1-decay) * gradient ** 2
+mean_grad = decay * mean_grad + (1-decay) * gradient
+
+Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)
+
+mg <- rho * mg_{t-1} + (1-rho) * grad
+ms <- rho * ms_{t-1} + (1-rho) * grad * grad
+mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon)
+var <- var - mom
+END
+}
+op {
+ graph_op_name: "ResourceApplyFtrl"
+ endpoint {
+ name: "ResourceApplyFtrl"
+ }
+ summary: "Update \'*var\' according to the Ftrl-proximal scheme."
+ description: <<END
+accum_new = accum + grad * grad
+linear += grad - (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
+quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
+var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
+accum = accum_new
+END
+}
+op {
+ graph_op_name: "ResourceApplyFtrlV2"
+ endpoint {
+ name: "ResourceApplyFtrlV2"
+ }
+ summary: "Update \'*var\' according to the Ftrl-proximal scheme."
+ description: <<END
+grad_with_shrinkage = grad + 2 * l2_shrinkage * var
+accum_new = accum + grad_with_shrinkage * grad_with_shrinkage
+linear += grad_with_shrinkage +
+ (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
+quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
+var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
+accum = accum_new
+END
+}
+op {
+ graph_op_name: "ResourceApplyGradientDescent"
+ endpoint {
+ name: "ResourceApplyGradientDescent"
+ }
+ summary: "Update \'*var\' by subtracting \'alpha\' * \'delta\' from it."
+}
+op {
+ graph_op_name: "ResourceApplyMomentum"
+ endpoint {
+ name: "ResourceApplyMomentum"
+ }
+ summary: "Update \'*var\' according to the momentum scheme. Set use_nesterov = True if you"
+ description: <<END
+want to use Nesterov momentum.
+
+accum = accum * momentum + grad
+var -= lr * accum
+END
+}
+op {
+ graph_op_name: "ResourceApplyProximalAdagrad"
+ endpoint {
+ name: "ResourceApplyProximalAdagrad"
+ }
+ summary: "Update \'*var\' and \'*accum\' according to FOBOS with Adagrad learning rate."
+ description: <<END
+accum += grad * grad
+prox_v = var - lr * grad * (1 / sqrt(accum))
+var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}
+END
+}
+op {
+ graph_op_name: "ResourceApplyProximalGradientDescent"
+ endpoint {
+ name: "ResourceApplyProximalGradientDescent"
+ }
+ summary: "Update \'*var\' as FOBOS algorithm with fixed learning rate."
+ description: <<END
+prox_v = var - alpha * delta
+var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}
+END
+}
+op {
+ graph_op_name: "ResourceApplyRMSProp"
+ endpoint {
+ name: "ResourceApplyRMSProp"
+ }
+ summary: "Update \'*var\' according to the RMSProp algorithm."
+ description: <<END
+Note that in dense implementation of this algorithm, ms and mom will
+update even if the grad is zero, but in this sparse implementation, ms
+and mom will not update in iterations during which the grad is zero.
+
+mean_square = decay * mean_square + (1-decay) * gradient ** 2
+Delta = learning_rate * gradient / sqrt(mean_square + epsilon)
+
+ms <- rho * ms_{t-1} + (1-rho) * grad * grad
+mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
+var <- var - mom
+END
+}
+op {
+ graph_op_name: "ResourceSparseApplyAdadelta"
+ endpoint {
+ name: "ResourceSparseApplyAdadelta"
+ }
+ summary: "var: Should be from a Variable()."
+}
+op {
+ graph_op_name: "ResourceSparseApplyAdagrad"
+ endpoint {
+ name: "ResourceSparseApplyAdagrad"
+ }
+ summary: "Update relevant entries in \'*var\' and \'*accum\' according to the adagrad scheme."
+ description: <<END
+That is for rows we have grad for, we update var and accum as follows:
+accum += grad * grad
+var -= lr * grad * (1 / sqrt(accum))
+END
+}
+op {
+ graph_op_name: "ResourceSparseApplyAdagradDA"
+ endpoint {
+ name: "ResourceSparseApplyAdagradDA"
+ }
+ summary: "Update entries in \'*var\' and \'*accum\' according to the proximal adagrad scheme."
+}
+op {
+ graph_op_name: "ResourceSparseApplyCenteredRMSProp"
+ endpoint {
+ name: "ResourceSparseApplyCenteredRMSProp"
+ }
+ summary: "Update \'*var\' according to the centered RMSProp algorithm."
+ description: <<END
+The centered RMSProp algorithm uses an estimate of the centered second moment
+(i.e., the variance) for normalization, as opposed to regular RMSProp, which
+uses the (uncentered) second moment. This often helps with training, but is
+slightly more expensive in terms of computation and memory.
+
+Note that in dense implementation of this algorithm, mg, ms, and mom will
+update even if the grad is zero, but in this sparse implementation, mg, ms,
+and mom will not update in iterations during which the grad is zero.
+
+mean_square = decay * mean_square + (1-decay) * gradient ** 2
+mean_grad = decay * mean_grad + (1-decay) * gradient
+Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)
+
+ms <- rho * ms_{t-1} + (1-rho) * grad * grad
+mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
+var <- var - mom
+END
+}
+op {
+ graph_op_name: "ResourceSparseApplyFtrl"
+ endpoint {
+ name: "ResourceSparseApplyFtrl"
+ }
+ summary: "Update relevant entries in \'*var\' according to the Ftrl-proximal scheme."
+ description: <<END
+That is for rows we have grad for, we update var, accum and linear as follows:
+accum_new = accum + grad * grad
+linear += grad + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
+quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
+var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
+accum = accum_new
+END
+}
+op {
+ graph_op_name: "ResourceSparseApplyFtrlV2"
+ endpoint {
+ name: "ResourceSparseApplyFtrlV2"
+ }
+ summary: "Update relevant entries in \'*var\' according to the Ftrl-proximal scheme."
+ description: <<END
+That is for rows we have grad for, we update var, accum and linear as follows:
+grad_with_shrinkage = grad + 2 * l2_shrinkage * var
+accum_new = accum + grad_with_shrinkage * grad_with_shrinkage
+linear += grad_with_shrinkage +
+ (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
+quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
+var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
+accum = accum_new
+END
+}
+op {
+ graph_op_name: "ResourceSparseApplyMomentum"
+ endpoint {
+ name: "ResourceSparseApplyMomentum"
+ }
+ summary: "Update relevant entries in \'*var\' and \'*accum\' according to the momentum scheme."
+ description: <<END
+Set use_nesterov = True if you want to use Nesterov momentum.
+
+That is for rows we have grad for, we update var and accum as follows:
+
+accum = accum * momentum + grad
+var -= lr * accum
+END
+}
+op {
+ graph_op_name: "ResourceSparseApplyProximalAdagrad"
+ endpoint {
+ name: "ResourceSparseApplyProximalAdagrad"
+ }
+ summary: "Sparse update entries in \'*var\' and \'*accum\' according to FOBOS algorithm."
+ description: <<END
+That is for rows we have grad for, we update var and accum as follows:
+accum += grad * grad
+prox_v = var
+prox_v -= lr * grad * (1 / sqrt(accum))
+var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}
+END
+}
+op {
+ graph_op_name: "ResourceSparseApplyProximalGradientDescent"
+ endpoint {
+ name: "ResourceSparseApplyProximalGradientDescent"
+ }
+ summary: "Sparse update \'*var\' as FOBOS algorithm with fixed learning rate."
+ description: <<END
+That is for rows we have grad for, we update var as follows:
+prox_v = var - alpha * grad
+var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}
+END
+}
+op {
+ graph_op_name: "ResourceSparseApplyRMSProp"
+ endpoint {
+ name: "ResourceSparseApplyRMSProp"
+ }
+ summary: "Update \'*var\' according to the RMSProp algorithm."
+ description: <<END
+Note that in dense implementation of this algorithm, ms and mom will
+update even if the grad is zero, but in this sparse implementation, ms
+and mom will not update in iterations during which the grad is zero.
+
+mean_square = decay * mean_square + (1-decay) * gradient ** 2
+Delta = learning_rate * gradient / sqrt(mean_square + epsilon)
+
+ms <- rho * ms_{t-1} + (1-rho) * grad * grad
+mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
+var <- var - mom
+END
+}
+op {
+ graph_op_name: "ResourceStridedSliceAssign"
+ endpoint {
+ name: "ResourceStridedSliceAssign"
+ }
+ summary: "Assign `value` to the sliced l-value reference of `ref`."
+ description: <<END
+The values of `value` are assigned to the positions in the variable
+`ref` that are selected by the slice parameters. The slice parameters
+`begin, `end`, `strides`, etc. work exactly as in `StridedSlice`.
+
+NOTE this op currently does not support broadcasting and so `value`'s
+shape must be exactly the shape produced by the slice of `ref`.
+END
+}
+op {
+ graph_op_name: "Restore"
+ endpoint {
+ name: "Restore"
+ }
+ summary: "Restores a tensor from checkpoint files."
+ description: <<END
+Reads a tensor stored in one or several files. If there are several files (for
+instance because a tensor was saved as slices), `file_pattern` may contain
+wildcard symbols (`*` and `?`) in the filename portion only, not in the
+directory portion.
+
+If a `file_pattern` matches several files, `preferred_shard` can be used to hint
+in which file the requested tensor is likely to be found. This op will first
+open the file at index `preferred_shard` in the list of matching files and try
+to restore tensors from that file. Only if some tensors or tensor slices are
+not found in that first file, then the Op opens all the files. Setting
+`preferred_shard` to match the value passed as the `shard` input
+of a matching `Save` Op may speed up Restore. This attribute only affects
+performance, not correctness. The default value -1 means files are processed in
+order.
+
+See also `RestoreSlice`.
+END
+}
+op {
+ graph_op_name: "RestoreIterator"
+ endpoint {
+ name: "RestoreIterator"
+ }
+ summary: "Restores the state of the `iterator` from the checkpoint saved at `path` using \"SaveIterator\"."
+}
+op {
+ graph_op_name: "RestoreSlice"
+ endpoint {
+ name: "RestoreSlice"
+ }
+ summary: "Restores a tensor from checkpoint files."
+ description: <<END
+This is like `Restore` except that restored tensor can be listed as filling
+only a slice of a larger tensor. `shape_and_slice` specifies the shape of the
+larger tensor and the slice that the restored tensor covers.
+
+The `shape_and_slice` input has the same format as the
+elements of the `shapes_and_slices` input of the `SaveSlices` op.
+END
+}
+op {
+ graph_op_name: "RestoreV2"
+ endpoint {
+ name: "RestoreV2"
+ }
+ summary: "Restores tensors from a V2 checkpoint."
+ description: <<END
+For backward compatibility with the V1 format, this Op currently allows
+restoring from a V1 checkpoint as well:
+ - This Op first attempts to find the V2 index file pointed to by "prefix", and
+ if found proceed to read it as a V2 checkpoint;
+ - Otherwise the V1 read path is invoked.
+Relying on this behavior is not recommended, as the ability to fall back to read
+V1 might be deprecated and eventually removed.
+
+By default, restores the named tensors in full. If the caller wishes to restore
+specific slices of stored tensors, "shape_and_slices" should be non-empty
+strings and correspondingly well-formed.
+
+Callers must ensure all the named tensors are indeed stored in the checkpoint.
+END
+}
+op {
+ graph_op_name: "Reverse"
+ endpoint {
+ name: "Reverse"
+ }
+ summary: "Reverses specific dimensions of a tensor."
+ description: <<END
+Given a `tensor`, and a `bool` tensor `dims` representing the dimensions
+of `tensor`, this operation reverses each dimension i of `tensor` where
+`dims[i]` is `True`.
+
+`tensor` can have up to 8 dimensions. The number of dimensions
+of `tensor` must equal the number of elements in `dims`. In other words:
+
+`rank(tensor) = size(dims)`
+
+For example:
+
+```
+# tensor 't' is [[[[ 0, 1, 2, 3],
+# [ 4, 5, 6, 7],
+# [ 8, 9, 10, 11]],
+# [[12, 13, 14, 15],
+# [16, 17, 18, 19],
+# [20, 21, 22, 23]]]]
+# tensor 't' shape is [1, 2, 3, 4]
+
+# 'dims' is [False, False, False, True]
+reverse(t, dims) ==> [[[[ 3, 2, 1, 0],
+ [ 7, 6, 5, 4],
+ [ 11, 10, 9, 8]],
+ [[15, 14, 13, 12],
+ [19, 18, 17, 16],
+ [23, 22, 21, 20]]]]
+
+# 'dims' is [False, True, False, False]
+reverse(t, dims) ==> [[[[12, 13, 14, 15],
+ [16, 17, 18, 19],
+ [20, 21, 22, 23]
+ [[ 0, 1, 2, 3],
+ [ 4, 5, 6, 7],
+ [ 8, 9, 10, 11]]]]
+
+# 'dims' is [False, False, True, False]
+reverse(t, dims) ==> [[[[8, 9, 10, 11],
+ [4, 5, 6, 7],
+ [0, 1, 2, 3]]
+ [[20, 21, 22, 23],
+ [16, 17, 18, 19],
+ [12, 13, 14, 15]]]]
+```
+END
+}
+op {
+ graph_op_name: "ReverseSequence"
+ endpoint {
+ name: "ReverseSequence"
+ }
+ summary: "Reverses variable length slices."
+ description: <<END
+This op first slices `input` along the dimension `batch_dim`, and for each
+slice `i`, reverses the first `seq_lengths[i]` elements along
+the dimension `seq_dim`.
+
+The elements of `seq_lengths` must obey `seq_lengths[i] <= input.dims[seq_dim]`,
+and `seq_lengths` must be a vector of length `input.dims[batch_dim]`.
+
+The output slice `i` along dimension `batch_dim` is then given by input
+slice `i`, with the first `seq_lengths[i]` slices along dimension
+`seq_dim` reversed.
+
+For example:
+
+```
+# Given this:
+batch_dim = 0
+seq_dim = 1
+input.dims = (4, 8, ...)
+seq_lengths = [7, 2, 3, 5]
+
+# then slices of input are reversed on seq_dim, but only up to seq_lengths:
+output[0, 0:7, :, ...] = input[0, 7:0:-1, :, ...]
+output[1, 0:2, :, ...] = input[1, 2:0:-1, :, ...]
+output[2, 0:3, :, ...] = input[2, 3:0:-1, :, ...]
+output[3, 0:5, :, ...] = input[3, 5:0:-1, :, ...]
+
+# while entries past seq_lens are copied through:
+output[0, 7:, :, ...] = input[0, 7:, :, ...]
+output[1, 2:, :, ...] = input[1, 2:, :, ...]
+output[2, 3:, :, ...] = input[2, 3:, :, ...]
+output[3, 2:, :, ...] = input[3, 2:, :, ...]
+```
+
+In contrast, if:
+
+```
+# Given this:
+batch_dim = 2
+seq_dim = 0
+input.dims = (8, ?, 4, ...)
+seq_lengths = [7, 2, 3, 5]
+
+# then slices of input are reversed on seq_dim, but only up to seq_lengths:
+output[0:7, :, 0, :, ...] = input[7:0:-1, :, 0, :, ...]
+output[0:2, :, 1, :, ...] = input[2:0:-1, :, 1, :, ...]
+output[0:3, :, 2, :, ...] = input[3:0:-1, :, 2, :, ...]
+output[0:5, :, 3, :, ...] = input[5:0:-1, :, 3, :, ...]
+
+# while entries past seq_lens are copied through:
+output[7:, :, 0, :, ...] = input[7:, :, 0, :, ...]
+output[2:, :, 1, :, ...] = input[2:, :, 1, :, ...]
+output[3:, :, 2, :, ...] = input[3:, :, 2, :, ...]
+output[2:, :, 3, :, ...] = input[2:, :, 3, :, ...]
+```
+END
+}
+op {
+ graph_op_name: "ReverseV2"
+ endpoint {
+ name: "ReverseV2"
+ }
+ summary: "Reverses specific dimensions of a tensor."
+ description: <<END
+NOTE `tf.reverse` has now changed behavior in preparation for 1.0.
+`tf.reverse_v2` is currently an alias that will be deprecated before TF 1.0.
+
+Given a `tensor`, and a `int32` tensor `axis` representing the set of
+dimensions of `tensor` to reverse. This operation reverses each dimension
+`i` for which there exists `j` s.t. `axis[j] == i`.
+
+`tensor` can have up to 8 dimensions. The number of dimensions specified
+in `axis` may be 0 or more entries. If an index is specified more than
+once, a InvalidArgument error is raised.
+
+For example:
+
+```
+# tensor 't' is [[[[ 0, 1, 2, 3],
+# [ 4, 5, 6, 7],
+# [ 8, 9, 10, 11]],
+# [[12, 13, 14, 15],
+# [16, 17, 18, 19],
+# [20, 21, 22, 23]]]]
+# tensor 't' shape is [1, 2, 3, 4]
+
+# 'dims' is [3] or 'dims' is -1
+reverse(t, dims) ==> [[[[ 3, 2, 1, 0],
+ [ 7, 6, 5, 4],
+ [ 11, 10, 9, 8]],
+ [[15, 14, 13, 12],
+ [19, 18, 17, 16],
+ [23, 22, 21, 20]]]]
+
+# 'dims' is '[1]' (or 'dims' is '[-3]')
+reverse(t, dims) ==> [[[[12, 13, 14, 15],
+ [16, 17, 18, 19],
+ [20, 21, 22, 23]
+ [[ 0, 1, 2, 3],
+ [ 4, 5, 6, 7],
+ [ 8, 9, 10, 11]]]]
+
+# 'dims' is '[2]' (or 'dims' is '[-2]')
+reverse(t, dims) ==> [[[[8, 9, 10, 11],
+ [4, 5, 6, 7],
+ [0, 1, 2, 3]]
+ [[20, 21, 22, 23],
+ [16, 17, 18, 19],
+ [12, 13, 14, 15]]]]
+```
+END
+}
+op {
+ graph_op_name: "Rint"
+ endpoint {
+ name: "Rint"
+ }
+ summary: "Returns element-wise integer closest to x."
+ description: <<END
+If the result is midway between two representable values,
+the even representable is chosen.
+For example:
+
+```
+rint(-1.5) ==> -2.0
+rint(0.5000001) ==> 1.0
+rint([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) ==> [-2., -2., -0., 0., 2., 2., 2.]
+```
+END
+}
+op {
+ graph_op_name: "Round"
+ endpoint {
+ name: "Round"
+ }
+ summary: "Rounds the values of a tensor to the nearest integer, element-wise."
+ description: <<END
+Rounds half to even. Also known as bankers rounding. If you want to round
+according to the current system rounding mode use std::cint.
+END
+}
+op {
+ graph_op_name: "Rsqrt"
+ endpoint {
+ name: "Rsqrt"
+ }
+ summary: "Computes reciprocal of square root of x element-wise."
+ description: <<END
+I.e., \\(y = 1 / \sqrt{x}\\).
+END
+}
+op {
+ graph_op_name: "RsqrtGrad"
+ endpoint {
+ name: "RsqrtGrad"
+ }
+ summary: "Computes the gradient for the rsqrt of `x` wrt its input."
+ description: <<END
+Specifically, `grad = dy * -0.5 * y^3`, where `y = rsqrt(x)`, and `dy`
+is the corresponding input gradient.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_S.pbtxt b/tensorflow/core/api_def/base_api/api_def_S.pbtxt
new file mode 100644
index 0000000000..9c53f9ac62
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_S.pbtxt
@@ -0,0 +1,2678 @@
+op {
+ graph_op_name: "SampleDistortedBoundingBox"
+ endpoint {
+ name: "SampleDistortedBoundingBox"
+ }
+ summary: "Generate a single randomly distorted bounding box for an image."
+ description: <<END
+Bounding box annotations are often supplied in addition to ground-truth labels
+in image recognition or object localization tasks. A common technique for
+training such a system is to randomly distort an image while preserving
+its content, i.e. *data augmentation*. This Op outputs a randomly distorted
+localization of an object, i.e. bounding box, given an `image_size`,
+`bounding_boxes` and a series of constraints.
+
+The output of this Op is a single bounding box that may be used to crop the
+original image. The output is returned as 3 tensors: `begin`, `size` and
+`bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the
+image. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize
+what the bounding box looks like.
+
+Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The
+bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and
+height of the underlying image.
+
+For example,
+
+```python
+ # Generate a single distorted bounding box.
+ begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box(
+ tf.shape(image),
+ bounding_boxes=bounding_boxes)
+
+ # Draw the bounding box in an image summary.
+ image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),
+ bbox_for_draw)
+ tf.image_summary('images_with_box', image_with_box)
+
+ # Employ the bounding box to distort the image.
+ distorted_image = tf.slice(image, begin, size)
+```
+
+Note that if no bounding box information is available, setting
+`use_image_if_no_bounding_boxes = true` will assume there is a single implicit
+bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is
+false and no bounding boxes are supplied, an error is raised.
+END
+}
+op {
+ graph_op_name: "SampleDistortedBoundingBoxV2"
+ endpoint {
+ name: "SampleDistortedBoundingBoxV2"
+ }
+ summary: "Generate a single randomly distorted bounding box for an image."
+ description: <<END
+Bounding box annotations are often supplied in addition to ground-truth labels
+in image recognition or object localization tasks. A common technique for
+training such a system is to randomly distort an image while preserving
+its content, i.e. *data augmentation*. This Op outputs a randomly distorted
+localization of an object, i.e. bounding box, given an `image_size`,
+`bounding_boxes` and a series of constraints.
+
+The output of this Op is a single bounding box that may be used to crop the
+original image. The output is returned as 3 tensors: `begin`, `size` and
+`bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the
+image. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize
+what the bounding box looks like.
+
+Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The
+bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and
+height of the underlying image.
+
+For example,
+
+```python
+ # Generate a single distorted bounding box.
+ begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box(
+ tf.shape(image),
+ bounding_boxes=bounding_boxes)
+
+ # Draw the bounding box in an image summary.
+ image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),
+ bbox_for_draw)
+ tf.image_summary('images_with_box', image_with_box)
+
+ # Employ the bounding box to distort the image.
+ distorted_image = tf.slice(image, begin, size)
+```
+
+Note that if no bounding box information is available, setting
+`use_image_if_no_bounding_boxes = true` will assume there is a single implicit
+bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is
+false and no bounding boxes are supplied, an error is raised.
+END
+}
+op {
+ graph_op_name: "Save"
+ endpoint {
+ name: "Save"
+ }
+ summary: "Saves the input tensors to disk."
+ description: <<END
+The size of `tensor_names` must match the number of tensors in `data`. `data[i]`
+is written to `filename` with name `tensor_names[i]`.
+
+See also `SaveSlices`.
+END
+}
+op {
+ graph_op_name: "SaveIterator"
+ endpoint {
+ name: "SaveIterator"
+ }
+ summary: "Saves the state of the `iterator` at `path`."
+ description: <<END
+This state can be restored using "RestoreIterator".
+END
+}
+op {
+ graph_op_name: "SaveSlices"
+ endpoint {
+ name: "SaveSlices"
+ }
+ summary: "Saves input tensors slices to disk."
+ description: <<END
+This is like `Save` except that tensors can be listed in the saved file as being
+a slice of a larger tensor. `shapes_and_slices` specifies the shape of the
+larger tensor and the slice that this tensor covers. `shapes_and_slices` must
+have as many elements as `tensor_names`.
+
+Elements of the `shapes_and_slices` input must either be:
+
+* The empty string, in which case the corresponding tensor is
+ saved normally.
+* A string of the form `dim0 dim1 ... dimN-1 slice-spec` where the
+ `dimI` are the dimensions of the larger tensor and `slice-spec`
+ specifies what part is covered by the tensor to save.
+
+`slice-spec` itself is a `:`-separated list: `slice0:slice1:...:sliceN-1`
+where each `sliceI` is either:
+
+* The string `-` meaning that the slice covers all indices of this dimension
+* `start,length` where `start` and `length` are integers. In that
+ case the slice covers `length` indices starting at `start`.
+
+See also `Save`.
+END
+}
+op {
+ graph_op_name: "SaveV2"
+ endpoint {
+ name: "SaveV2"
+ }
+ summary: "Saves tensors in V2 checkpoint format."
+ description: <<END
+By default, saves the named tensors in full. If the caller wishes to save
+specific slices of full tensors, "shape_and_slices" should be non-empty strings
+and correspondingly well-formed.
+END
+}
+op {
+ graph_op_name: "ScalarSummary"
+ endpoint {
+ name: "ScalarSummary"
+ }
+ summary: "Outputs a `Summary` protocol buffer with scalar values."
+ description: <<END
+The input `tags` and `values` must have the same shape. The generated summary
+has a summary value for each tag-value pair in `tags` and `values`.
+END
+}
+op {
+ graph_op_name: "ScatterAdd"
+ endpoint {
+ name: "ScatterAdd"
+ }
+ summary: "Adds sparse updates to a variable reference."
+ description: <<END
+This operation computes
+
+ # Scalar indices
+ ref[indices, ...] += updates[...]
+
+ # Vector indices (for each i)
+ ref[indices[i], ...] += updates[i, ...]
+
+ # High rank indices (for each i, ..., j)
+ ref[indices[i, ..., j], ...] += updates[i, ..., j, ...]
+
+This operation outputs `ref` after the update is done.
+This makes it easier to chain operations that need to use the reset value.
+
+Duplicate entries are handled correctly: if multiple `indices` reference
+the same location, their contributions add.
+
+Requires `updates.shape = indices.shape + ref.shape[1:]`.
+
+<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
+<img style="width:100%" src="https://www.tensorflow.org/images/ScatterAdd.png" alt>
+</div>
+END
+}
+op {
+ graph_op_name: "ScatterDiv"
+ endpoint {
+ name: "ScatterDiv"
+ }
+ summary: "Divides a variable reference by sparse updates."
+ description: <<END
+This operation computes
+
+```python
+ # Scalar indices
+ ref[indices, ...] /= updates[...]
+
+ # Vector indices (for each i)
+ ref[indices[i], ...] /= updates[i, ...]
+
+ # High rank indices (for each i, ..., j)
+ ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...]
+```
+
+This operation outputs `ref` after the update is done.
+This makes it easier to chain operations that need to use the reset value.
+
+Duplicate entries are handled correctly: if multiple `indices` reference
+the same location, their contributions divide.
+
+Requires `updates.shape = indices.shape + ref.shape[1:]`.
+END
+}
+op {
+ graph_op_name: "ScatterMul"
+ endpoint {
+ name: "ScatterMul"
+ }
+ summary: "Multiplies sparse updates into a variable reference."
+ description: <<END
+This operation computes
+
+```python
+ # Scalar indices
+ ref[indices, ...] *= updates[...]
+
+ # Vector indices (for each i)
+ ref[indices[i], ...] *= updates[i, ...]
+
+ # High rank indices (for each i, ..., j)
+ ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...]
+```
+
+This operation outputs `ref` after the update is done.
+This makes it easier to chain operations that need to use the reset value.
+
+Duplicate entries are handled correctly: if multiple `indices` reference
+the same location, their contributions multiply.
+
+Requires `updates.shape = indices.shape + ref.shape[1:]`.
+END
+}
+op {
+ graph_op_name: "ScatterNd"
+ endpoint {
+ name: "ScatterNd"
+ }
+ summary: "Scatter `updates` into a new (initially zero) tensor according to `indices`."
+ description: <<END
+Creates a new tensor by applying sparse `updates` to individual
+values or slices within a zero tensor of the given `shape` according to
+indices. This operator is the inverse of the @{tf.gather_nd} operator which
+extracts values or slices from a given tensor.
+
+**WARNING**: The order in which updates are applied is nondeterministic, so the
+output will be nondeterministic if `indices` contains duplicates.
+
+`indices` is an integer tensor containing indices into a new tensor of shape
+`shape`. The last dimension of `indices` can be at most the rank of `shape`:
+
+ indices.shape[-1] <= shape.rank
+
+The last dimension of `indices` corresponds to indices into elements
+(if `indices.shape[-1] = shape.rank`) or slices
+(if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of
+`shape`. `updates` is a tensor with shape
+
+ indices.shape[:-1] + shape[indices.shape[-1]:]
+
+The simplest form of scatter is to insert individual elements in a tensor by
+index. For example, say we want to insert 4 scattered elements in a rank-1
+tensor with 8 elements.
+
+<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
+<img style="width:100%" src="https://www.tensorflow.org/images/ScatterNd1.png" alt>
+</div>
+
+In Python, this scatter operation would look like this:
+
+```python
+ indices = tf.constant([[4], [3], [1], [7]])
+ updates = tf.constant([9, 10, 11, 12])
+ shape = tf.constant([8])
+ scatter = tf.scatter_nd(indices, updates, shape)
+ with tf.Session() as sess:
+ print(sess.run(scatter))
+```
+
+The resulting tensor would look like this:
+
+ [0, 11, 0, 10, 9, 0, 0, 12]
+
+We can also, insert entire slices of a higher rank tensor all at once. For
+example, if we wanted to insert two slices in the first dimension of a
+rank-3 tensor with two matrices of new values.
+
+<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
+<img style="width:100%" src="https://www.tensorflow.org/images/ScatterNd2.png" alt>
+</div>
+
+In Python, this scatter operation would look like this:
+
+```python
+ indices = tf.constant([[0], [2]])
+ updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],
+ [7, 7, 7, 7], [8, 8, 8, 8]],
+ [[5, 5, 5, 5], [6, 6, 6, 6],
+ [7, 7, 7, 7], [8, 8, 8, 8]]])
+ shape = tf.constant([4, 4, 4])
+ scatter = tf.scatter_nd(indices, updates, shape)
+ with tf.Session() as sess:
+ print(sess.run(scatter))
+```
+
+The resulting tensor would look like this:
+
+ [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
+ [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
+ [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
+ [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]]
+END
+}
+op {
+ graph_op_name: "ScatterNdAdd"
+ endpoint {
+ name: "ScatterNdAdd"
+ }
+ summary: "Applies sparse addition between `updates` and individual values or slices"
+ description: <<END
+within a given variable according to `indices`.
+
+`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
+
+`indices` must be integer tensor, containing indices into `ref`.
+It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
+
+The innermost dimension of `indices` (with length `K`) corresponds to
+indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
+dimension of `ref`.
+
+`updates` is `Tensor` of rank `Q-1+P-K` with shape:
+
+```
+[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
+```
+
+For example, say we want to add 4 scattered elements to a rank-1 tensor to 8
+elements. In Python, that addition would look like this:
+
+ ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
+ indices = tf.constant([[4], [3], [1], [7]])
+ updates = tf.constant([9, 10, 11, 12])
+ add = tf.scatter_nd_add(ref, indices, updates)
+ with tf.Session() as sess:
+ print sess.run(add)
+
+The resulting update to ref would look like this:
+
+ [1, 13, 3, 14, 14, 6, 7, 20]
+
+See @{tf.scatter_nd} for more details about how to make updates to
+slices.
+END
+}
+op {
+ graph_op_name: "ScatterNdNonAliasingAdd"
+ endpoint {
+ name: "ScatterNdNonAliasingAdd"
+ }
+ summary: "Applies sparse addition to `input` using individual values or slices"
+ description: <<END
+from `updates` according to indices `indices`. The updates are non-aliasing:
+`input` is only modified in-place if no other operations will use it.
+Otherwise, a copy of `input` is made. This operation has a gradient with
+respect to both `input` and `updates`.
+
+`input` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
+
+`indices` must be integer tensor, containing indices into `input`.
+It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
+
+The innermost dimension of `indices` (with length `K`) corresponds to
+indices into elements (if `K = P`) or `(P-K)`-dimensional slices
+(if `K < P`) along the `K`th dimension of `input`.
+
+`updates` is `Tensor` of rank `Q-1+P-K` with shape:
+
+```
+[d_0, ..., d_{Q-2}, input.shape[K], ..., input.shape[P-1]].
+```
+
+For example, say we want to add 4 scattered elements to a rank-1 tensor to 8
+elements. In Python, that addition would look like this:
+
+ input = tf.constant([1, 2, 3, 4, 5, 6, 7, 8])
+ indices = tf.constant([[4], [3], [1], [7]])
+ updates = tf.constant([9, 10, 11, 12])
+ output = tf.scatter_nd_non_aliasing_add(input, indices, updates)
+ with tf.Session() as sess:
+ print(sess.run(output))
+
+The resulting value `output` would look like this:
+
+ [1, 13, 3, 14, 14, 6, 7, 20]
+
+See @{tf.scatter_nd} for more details about how to make updates to slices.
+END
+}
+op {
+ graph_op_name: "ScatterNdSub"
+ endpoint {
+ name: "ScatterNdSub"
+ }
+ summary: "Applies sparse subtraction between `updates` and individual values or slices"
+ description: <<END
+within a given variable according to `indices`.
+
+`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
+
+`indices` must be integer tensor, containing indices into `ref`.
+It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
+
+The innermost dimension of `indices` (with length `K`) corresponds to
+indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
+dimension of `ref`.
+
+`updates` is `Tensor` of rank `Q-1+P-K` with shape:
+
+```
+[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
+```
+
+For example, say we want to subtract 4 scattered elements from a rank-1 tensor
+with 8 elements. In Python, that subtraction would look like this:
+
+ ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
+ indices = tf.constant([[4], [3], [1], [7]])
+ updates = tf.constant([9, 10, 11, 12])
+ sub = tf.scatter_nd_sub(ref, indices, updates)
+ with tf.Session() as sess:
+ print sess.run(sub)
+
+The resulting update to ref would look like this:
+
+ [1, -9, 3, -6, -4, 6, 7, -4]
+
+See @{tf.scatter_nd} for more details about how to make updates to
+slices.
+END
+}
+op {
+ graph_op_name: "ScatterNdUpdate"
+ endpoint {
+ name: "ScatterNdUpdate"
+ }
+ summary: "Applies sparse `updates` to individual values or slices within a given"
+ description: <<END
+variable according to `indices`.
+
+`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
+
+`indices` must be integer tensor, containing indices into `ref`.
+It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
+
+The innermost dimension of `indices` (with length `K`) corresponds to
+indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
+dimension of `ref`.
+
+`updates` is `Tensor` of rank `Q-1+P-K` with shape:
+
+```
+[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
+```
+
+For example, say we want to update 4 scattered elements to a rank-1 tensor to
+8 elements. In Python, that update would look like this:
+
+```python
+ ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
+ indices = tf.constant([[4], [3], [1] ,[7]])
+ updates = tf.constant([9, 10, 11, 12])
+ update = tf.scatter_nd_update(ref, indices, updates)
+ with tf.Session() as sess:
+ print sess.run(update)
+```
+
+The resulting update to ref would look like this:
+
+ [1, 11, 3, 10, 9, 6, 7, 12]
+
+See @{tf.scatter_nd} for more details about how to make updates to
+slices.
+END
+}
+op {
+ graph_op_name: "ScatterSub"
+ endpoint {
+ name: "ScatterSub"
+ }
+ summary: "Subtracts sparse updates to a variable reference."
+ description: <<END
+```python
+ # Scalar indices
+ ref[indices, ...] -= updates[...]
+
+ # Vector indices (for each i)
+ ref[indices[i], ...] -= updates[i, ...]
+
+ # High rank indices (for each i, ..., j)
+ ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...]
+```
+
+This operation outputs `ref` after the update is done.
+This makes it easier to chain operations that need to use the reset value.
+
+Duplicate entries are handled correctly: if multiple `indices` reference
+the same location, their (negated) contributions add.
+
+Requires `updates.shape = indices.shape + ref.shape[1:]`.
+
+<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
+<img style="width:100%" src="https://www.tensorflow.org/images/ScatterSub.png" alt>
+</div>
+END
+}
+op {
+ graph_op_name: "ScatterUpdate"
+ endpoint {
+ name: "ScatterUpdate"
+ }
+ summary: "Applies sparse updates to a variable reference."
+ description: <<END
+This operation computes
+
+```python
+ # Scalar indices
+ ref[indices, ...] = updates[...]
+
+ # Vector indices (for each i)
+ ref[indices[i], ...] = updates[i, ...]
+
+ # High rank indices (for each i, ..., j)
+ ref[indices[i, ..., j], ...] = updates[i, ..., j, ...]
+```
+
+This operation outputs `ref` after the update is done.
+This makes it easier to chain operations that need to use the reset value.
+
+If values in `ref` is to be updated more than once, because there are
+duplicate entries in `indices`, the order at which the updates happen
+for each value is undefined.
+
+Requires `updates.shape = indices.shape + ref.shape[1:]`.
+
+<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
+<img style="width:100%" src="https://www.tensorflow.org/images/ScatterUpdate.png" alt>
+</div>
+END
+}
+op {
+ graph_op_name: "SdcaFprint"
+ endpoint {
+ name: "SdcaFprint"
+ }
+ summary: "Computes fingerprints of the input strings."
+}
+op {
+ graph_op_name: "SdcaOptimizer"
+ endpoint {
+ name: "SdcaOptimizer"
+ }
+ summary: "Distributed version of Stochastic Dual Coordinate Ascent (SDCA) optimizer for"
+ description: <<END
+linear models with L1 + L2 regularization. As global optimization objective is
+strongly-convex, the optimizer optimizes the dual objective at each step. The
+optimizer applies each update one example at a time. Examples are sampled
+uniformly, and the optimizer is learning rate free and enjoys linear convergence
+rate.
+
+[Proximal Stochastic Dual Coordinate Ascent](http://arxiv.org/pdf/1211.2717v1.pdf).<br>
+Shai Shalev-Shwartz, Tong Zhang. 2012
+
+$$Loss Objective = \sum f_{i} (wx_{i}) + (l2 / 2) * |w|^2 + l1 * |w|$$
+
+[Adding vs. Averaging in Distributed Primal-Dual Optimization](http://arxiv.org/abs/1502.03508).<br>
+Chenxin Ma, Virginia Smith, Martin Jaggi, Michael I. Jordan,
+Peter Richtarik, Martin Takac. 2015
+
+[Stochastic Dual Coordinate Ascent with Adaptive Probabilities](https://arxiv.org/abs/1502.08053).<br>
+Dominik Csiba, Zheng Qu, Peter Richtarik. 2015
+END
+}
+op {
+ graph_op_name: "SdcaShrinkL1"
+ endpoint {
+ name: "SdcaShrinkL1"
+ }
+ summary: "Applies L1 regularization shrink step on the parameters."
+}
+op {
+ graph_op_name: "SegmentMax"
+ endpoint {
+ name: "SegmentMax"
+ }
+ summary: "Computes the maximum along segments of a tensor."
+ description: <<END
+Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
+segments.
+
+Computes a tensor such that
+\\(output_i = \max_j(data_j)\\) where `max` is over `j` such
+that `segment_ids[j] == i`.
+
+If the max is empty for a given segment ID `i`, `output[i] = 0`.
+
+<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
+<img style="width:100%" src="https://www.tensorflow.org/images/SegmentMax.png" alt>
+</div>
+END
+}
+op {
+ graph_op_name: "SegmentMean"
+ endpoint {
+ name: "SegmentMean"
+ }
+ summary: "Computes the mean along segments of a tensor."
+ description: <<END
+Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
+segments.
+
+Computes a tensor such that
+\\(output_i = \frac{\sum_j data_j}{N}\\) where `mean` is
+over `j` such that `segment_ids[j] == i` and `N` is the total number of
+values summed.
+
+If the mean is empty for a given segment ID `i`, `output[i] = 0`.
+
+<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
+<img style="width:100%" src="https://www.tensorflow.org/images/SegmentMean.png" alt>
+</div>
+END
+}
+op {
+ graph_op_name: "SegmentMin"
+ endpoint {
+ name: "SegmentMin"
+ }
+ summary: "Computes the minimum along segments of a tensor."
+ description: <<END
+Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
+segments.
+
+Computes a tensor such that
+\\(output_i = \min_j(data_j)\\) where `min` is over `j` such
+that `segment_ids[j] == i`.
+
+If the min is empty for a given segment ID `i`, `output[i] = 0`.
+
+<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
+<img style="width:100%" src="https://www.tensorflow.org/images/SegmentMin.png" alt>
+</div>
+END
+}
+op {
+ graph_op_name: "SegmentProd"
+ endpoint {
+ name: "SegmentProd"
+ }
+ summary: "Computes the product along segments of a tensor."
+ description: <<END
+Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
+segments.
+
+Computes a tensor such that
+\\(output_i = \prod_j data_j\\) where the product is over `j` such
+that `segment_ids[j] == i`.
+
+If the product is empty for a given segment ID `i`, `output[i] = 1`.
+
+<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
+<img style="width:100%" src="https://www.tensorflow.org/images/SegmentProd.png" alt>
+</div>
+END
+}
+op {
+ graph_op_name: "SegmentSum"
+ endpoint {
+ name: "SegmentSum"
+ }
+ summary: "Computes the sum along segments of a tensor."
+ description: <<END
+Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
+segments.
+
+Computes a tensor such that
+\\(output_i = \sum_j data_j\\) where sum is over `j` such
+that `segment_ids[j] == i`.
+
+If the sum is empty for a given segment ID `i`, `output[i] = 0`.
+
+<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
+<img style="width:100%" src="https://www.tensorflow.org/images/SegmentSum.png" alt>
+</div>
+END
+}
+op {
+ graph_op_name: "Select"
+ endpoint {
+ name: "Select"
+ }
+ summary: "Selects elements from `t` or `e`, depending on `condition`."
+ description: <<END
+The `t`, and `e` tensors must all have the same shape, and the
+output will also have that shape.
+
+The `condition` tensor must be a scalar if `t` and `e` are scalars.
+If `t` and `e` are vectors or higher rank, then `condition` must be either a
+scalar, a vector with size matching the first dimension of `t`, or must have
+the same shape as `t`.
+
+The `condition` tensor acts as a mask that chooses, based on the value at each
+element, whether the corresponding element / row in the output should be
+taken from `t` (if true) or `e` (if false).
+
+If `condition` is a vector and `t` and `e` are higher rank matrices, then
+it chooses which row (outer dimension) to copy from `t` and `e`.
+If `condition` has the same shape as `t` and `e`, then it chooses which
+element to copy from `t` and `e`.
+
+For example:
+
+```python
+# 'condition' tensor is [[True, False]
+# [False, True]]
+# 't' is [[1, 2],
+# [3, 4]]
+# 'e' is [[5, 6],
+# [7, 8]]
+select(condition, t, e) # => [[1, 6], [7, 4]]
+
+
+# 'condition' tensor is [True, False]
+# 't' is [[1, 2],
+# [3, 4]]
+# 'e' is [[5, 6],
+# [7, 8]]
+select(condition, t, e) ==> [[1, 2],
+ [7, 8]]
+
+```
+END
+}
+op {
+ graph_op_name: "SelfAdjointEig"
+ endpoint {
+ name: "SelfAdjointEig"
+ }
+ summary: "Computes the Eigen Decomposition of a batch of square self-adjoint matrices."
+ description: <<END
+The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
+form square matrices, with the same constraints as the single matrix
+SelfAdjointEig.
+
+The result is a [..., M+1, M] matrix with [..., 0,:] containing the
+eigenvalues, and subsequent [...,1:, :] containing the eigenvectors.
+END
+}
+op {
+ graph_op_name: "SelfAdjointEigV2"
+ endpoint {
+ name: "SelfAdjointEigV2"
+ }
+ summary: "Computes the eigen decomposition of one or more square self-adjoint matrices."
+ description: <<END
+Computes the eigenvalues and (optionally) eigenvectors of each inner matrix in
+`input` such that `input[..., :, :] = v[..., :, :] * diag(e[..., :])`.
+
+```python
+# a is a tensor.
+# e is a tensor of eigenvalues.
+# v is a tensor of eigenvectors.
+e, v = self_adjoint_eig(a)
+e = self_adjoint_eig(a, compute_v=False)
+```
+END
+}
+op {
+ graph_op_name: "Selu"
+ endpoint {
+ name: "Selu"
+ }
+ summary: "Computes scaled exponential linear: `scale * alpha * (exp(features) - 1)`"
+ description: <<END
+if < 0, `scale * features` otherwise.
+
+See [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515)
+END
+}
+op {
+ graph_op_name: "SeluGrad"
+ endpoint {
+ name: "SeluGrad"
+ }
+ summary: "Computes gradients for the scaled exponential linear (Selu) operation."
+}
+op {
+ graph_op_name: "SerializeManySparse"
+ endpoint {
+ name: "SerializeManySparse"
+ }
+ summary: "Serialize an `N`-minibatch `SparseTensor` into an `[N, 3]` string `Tensor`."
+ description: <<END
+The `SparseTensor` must have rank `R` greater than 1, and the first dimension
+is treated as the minibatch dimension. Elements of the `SparseTensor`
+must be sorted in increasing order of this first dimension. The serialized
+`SparseTensor` objects going into each row of `serialized_sparse` will have
+rank `R-1`.
+
+The minibatch size `N` is extracted from `sparse_shape[0]`.
+END
+}
+op {
+ graph_op_name: "SerializeSparse"
+ endpoint {
+ name: "SerializeSparse"
+ }
+ summary: "Serialize a `SparseTensor` into a string 3-vector (1-D `Tensor`) object."
+}
+op {
+ graph_op_name: "SerializeTensor"
+ endpoint {
+ name: "SerializeTensor"
+ }
+ summary: "Transforms a Tensor into a serialized TensorProto proto."
+}
+op {
+ graph_op_name: "SetSize"
+ endpoint {
+ name: "SetSize"
+ }
+ summary: "Number of unique elements along last dimension of input `set`."
+ description: <<END
+Input `set` is a `SparseTensor` represented by `set_indices`, `set_values`,
+and `set_shape`. The last dimension contains values in a set, duplicates are
+allowed but ignored.
+
+If `validate_indices` is `True`, this op validates the order and range of `set`
+indices.
+END
+}
+op {
+ graph_op_name: "Shape"
+ endpoint {
+ name: "Shape"
+ }
+ summary: "Returns the shape of a tensor."
+ description: <<END
+This operation returns a 1-D integer tensor representing the shape of `input`.
+
+For example:
+
+```
+# 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
+shape(t) ==> [2, 2, 3]
+```
+END
+}
+op {
+ graph_op_name: "ShapeN"
+ endpoint {
+ name: "ShapeN"
+ }
+ summary: "Returns shape of tensors."
+ description: <<END
+This operation returns N 1-D integer tensors representing shape of `input[i]s`.
+END
+}
+op {
+ graph_op_name: "ShardedFilename"
+ endpoint {
+ name: "ShardedFilename"
+ }
+ summary: "Generate a sharded filename. The filename is printf formatted as"
+ description: <<END
+ %s-%05d-of-%05d, basename, shard, num_shards.
+END
+}
+op {
+ graph_op_name: "ShardedFilespec"
+ endpoint {
+ name: "ShardedFilespec"
+ }
+ summary: "Generate a glob pattern matching all sharded file names."
+}
+op {
+ graph_op_name: "ShuffleDataset"
+ endpoint {
+ name: "ShuffleDataset"
+ }
+ summary: "Creates a dataset that shuffles elements from `input_dataset` pseudorandomly."
+}
+op {
+ graph_op_name: "Sigmoid"
+ endpoint {
+ name: "Sigmoid"
+ }
+ summary: "Computes sigmoid of `x` element-wise."
+ description: <<END
+Specifically, `y = 1 / (1 + exp(-x))`.
+END
+}
+op {
+ graph_op_name: "SigmoidGrad"
+ endpoint {
+ name: "SigmoidGrad"
+ }
+ summary: "Computes the gradient of the sigmoid of `x` wrt its input."
+ description: <<END
+Specifically, `grad = dy * y * (1 - y)`, where `y = sigmoid(x)`, and
+`dy` is the corresponding input gradient.
+END
+}
+op {
+ graph_op_name: "Sign"
+ endpoint {
+ name: "Sign"
+ }
+ summary: "Returns an element-wise indication of the sign of a number."
+ description: <<END
+`y = sign(x) = -1` if `x < 0`; 0 if `x == 0`; 1 if `x > 0`.
+
+For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`.
+END
+}
+op {
+ graph_op_name: "Sin"
+ endpoint {
+ name: "Sin"
+ }
+ summary: "Computes sin of x element-wise."
+}
+op {
+ graph_op_name: "Sinh"
+ endpoint {
+ name: "Sinh"
+ }
+ summary: "Computes hyperbolic sine of x element-wise."
+}
+op {
+ graph_op_name: "Size"
+ endpoint {
+ name: "Size"
+ }
+ summary: "Returns the size of a tensor."
+ description: <<END
+This operation returns an integer representing the number of elements in
+`input`.
+
+For example:
+
+```
+# 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]]
+size(t) ==> 12
+```
+END
+}
+op {
+ graph_op_name: "SkipDataset"
+ endpoint {
+ name: "SkipDataset"
+ }
+ summary: "Creates a dataset that skips `count` elements from the `input_dataset`."
+}
+op {
+ graph_op_name: "Skipgram"
+ endpoint {
+ name: "Skipgram"
+ }
+ summary: "Parses a text file and creates a batch of examples."
+}
+op {
+ graph_op_name: "Slice"
+ endpoint {
+ name: "Slice"
+ }
+ summary: "Return a slice from \'input\'."
+ description: <<END
+The output tensor is a tensor with dimensions described by 'size'
+whose values are extracted from 'input' starting at the offsets in
+'begin'.
+
+*Requirements*:
+ 0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n)
+END
+}
+op {
+ graph_op_name: "SloppyInterleaveDataset"
+ endpoint {
+ name: "SloppyInterleaveDataset"
+ }
+ summary: "Creates a dataset that applies `f` to the outputs of `input_dataset`."
+ description: <<END
+The resulting dataset is similar to the `InterleaveDataset`, with the exception
+that if retrieving the next value from a dataset would cause the requester to
+block, it will skip that input dataset. This dataset is especially useful
+when loading data from a variable-latency datastores (e.g. HDFS, GCS), as it
+allows the training step to proceed so long as some data is available.
+
+!! WARNING !! This dataset is not deterministic!
+END
+}
+op {
+ graph_op_name: "Softmax"
+ endpoint {
+ name: "Softmax"
+ }
+ summary: "Computes softmax activations."
+ description: <<END
+For each batch `i` and class `j` we have
+
+ softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))
+END
+}
+op {
+ graph_op_name: "SoftmaxCrossEntropyWithLogits"
+ endpoint {
+ name: "SoftmaxCrossEntropyWithLogits"
+ }
+ summary: "Computes softmax cross entropy cost and gradients to backpropagate."
+ description: <<END
+Inputs are the logits, not probabilities.
+END
+}
+op {
+ graph_op_name: "Softplus"
+ endpoint {
+ name: "Softplus"
+ }
+ summary: "Computes softplus: `log(exp(features) + 1)`."
+}
+op {
+ graph_op_name: "SoftplusGrad"
+ endpoint {
+ name: "SoftplusGrad"
+ }
+ summary: "Computes softplus gradients for a softplus operation."
+}
+op {
+ graph_op_name: "Softsign"
+ endpoint {
+ name: "Softsign"
+ }
+ summary: "Computes softsign: `features / (abs(features) + 1)`."
+}
+op {
+ graph_op_name: "SoftsignGrad"
+ endpoint {
+ name: "SoftsignGrad"
+ }
+ summary: "Computes softsign gradients for a softsign operation."
+}
+op {
+ graph_op_name: "SpaceToBatch"
+ endpoint {
+ name: "SpaceToBatch"
+ }
+ summary: "SpaceToBatch for 4-D tensors of type T."
+ description: <<END
+This is a legacy version of the more general SpaceToBatchND.
+
+Zero-pads and then rearranges (permutes) blocks of spatial data into batch.
+More specifically, this op outputs a copy of the input tensor where values from
+the `height` and `width` dimensions are moved to the `batch` dimension. After
+the zero-padding, both `height` and `width` of the input must be divisible by the
+block size.
+END
+}
+op {
+ graph_op_name: "SpaceToBatchND"
+ endpoint {
+ name: "SpaceToBatchND"
+ }
+ summary: "SpaceToBatch for N-D tensors of type T."
+ description: <<END
+This operation divides "spatial" dimensions `[1, ..., M]` of the input into a
+grid of blocks of shape `block_shape`, and interleaves these blocks with the
+"batch" dimension (0) such that in the output, the spatial dimensions
+`[1, ..., M]` correspond to the position within the grid, and the batch
+dimension combines both the position within a spatial block and the original
+batch position. Prior to division into blocks, the spatial dimensions of the
+input are optionally zero padded according to `paddings`. See below for a
+precise description.
+END
+}
+op {
+ graph_op_name: "SpaceToDepth"
+ endpoint {
+ name: "SpaceToDepth"
+ }
+ summary: "SpaceToDepth for tensors of type T."
+ description: <<END
+Rearranges blocks of spatial data, into depth. More specifically,
+this op outputs a copy of the input tensor where values from the `height`
+and `width` dimensions are moved to the `depth` dimension.
+The attr `block_size` indicates the input block size.
+
+ * Non-overlapping blocks of size `block_size x block size` are rearranged
+ into depth at each location.
+ * The depth of the output tensor is `block_size * block_size * input_depth`.
+ * The Y, X coordinates within each block of the input become the high order
+ component of the output channel index.
+ * The input tensor's height and width must be divisible by block_size.
+
+The `data_format` attr specifies the layout of the input and output tensors
+with the following options:
+ "NHWC": `[ batch, height, width, channels ]`
+ "NCHW": `[ batch, channels, height, width ]`
+ "NCHW_VECT_C":
+ `qint8 [ batch, channels / 4, height, width, channels % 4 ]`
+
+It is useful to consider the operation as transforming a 6-D Tensor.
+e.g. for data_format = NHWC,
+ Each element in the input tensor can be specified via 6 coordinates,
+ ordered by decreasing memory layout significance as:
+ n,oY,bY,oX,bX,iC (where n=batch index, oX, oY means X or Y coordinates
+ within the output image, bX, bY means coordinates
+ within the input block, iC means input channels).
+ The output would be a transpose to the following layout:
+ n,oY,oX,bY,bX,iC
+
+This operation is useful for resizing the activations between convolutions
+(but keeping all data), e.g. instead of pooling. It is also useful for training
+purely convolutional models.
+
+For example, given an input of shape `[1, 2, 2, 1]`, data_format = "NHWC" and
+block_size = 2:
+
+```
+x = [[[[1], [2]],
+ [[3], [4]]]]
+```
+
+This operation will output a tensor of shape `[1, 1, 1, 4]`:
+
+```
+[[[[1, 2, 3, 4]]]]
+```
+
+Here, the input has a batch of 1 and each batch element has shape `[2, 2, 1]`,
+the corresponding output will have a single element (i.e. width and height are
+both 1) and will have a depth of 4 channels (1 * block_size * block_size).
+The output element shape is `[1, 1, 4]`.
+
+For an input tensor with larger depth, here of shape `[1, 2, 2, 3]`, e.g.
+
+```
+x = [[[[1, 2, 3], [4, 5, 6]],
+ [[7, 8, 9], [10, 11, 12]]]]
+```
+
+This operation, for block_size of 2, will return the following tensor of shape
+`[1, 1, 1, 12]`
+
+```
+[[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]
+```
+
+Similarly, for the following input of shape `[1 4 4 1]`, and a block size of 2:
+
+```
+x = [[[[1], [2], [5], [6]],
+ [[3], [4], [7], [8]],
+ [[9], [10], [13], [14]],
+ [[11], [12], [15], [16]]]]
+```
+
+the operator will return the following tensor of shape `[1 2 2 4]`:
+
+```
+x = [[[[1, 2, 3, 4],
+ [5, 6, 7, 8]],
+ [[9, 10, 11, 12],
+ [13, 14, 15, 16]]]]
+```
+END
+}
+op {
+ graph_op_name: "SparseAccumulatorApplyGradient"
+ endpoint {
+ name: "SparseAccumulatorApplyGradient"
+ }
+ summary: "Applies a sparse gradient to a given accumulator."
+ description: <<END
+Does not add if local_step is smaller than the accumulator's
+global_step.
+END
+}
+op {
+ graph_op_name: "SparseAccumulatorTakeGradient"
+ endpoint {
+ name: "SparseAccumulatorTakeGradient"
+ }
+ summary: "Extracts the average sparse gradient in a SparseConditionalAccumulator."
+ description: <<END
+The op will blocks until sufficient (i.e., more than num_required)
+gradients have been accumulated. If the accumulator has already
+aggregated more than num_required gradients, it will return its
+average of the accumulated gradients. Also automatically increments
+the recorded global_step in the accumulator by 1, and resets the
+aggregate to 0.
+END
+}
+op {
+ graph_op_name: "SparseAdd"
+ endpoint {
+ name: "SparseAdd"
+ }
+ summary: "Adds two `SparseTensor` objects to produce another `SparseTensor`."
+ description: <<END
+The input `SparseTensor` objects' indices are assumed ordered in standard
+lexicographic order. If this is not the case, before this step run
+`SparseReorder` to restore index ordering.
+
+By default, if two values sum to zero at some index, the output `SparseTensor`
+would still include that particular location in its index, storing a zero in the
+corresponding value slot. To override this, callers can specify `thresh`,
+indicating that if the sum has a magnitude strictly smaller than `thresh`, its
+corresponding value and index would then not be included. In particular,
+`thresh == 0` (default) means everything is kept and actual thresholding happens
+only for a positive value.
+
+In the following shapes, `nnz` is the count after taking `thresh` into account.
+END
+}
+op {
+ graph_op_name: "SparseAddGrad"
+ endpoint {
+ name: "SparseAddGrad"
+ }
+ summary: "The gradient operator for the SparseAdd op."
+ description: <<END
+The SparseAdd op calculates A + B, where A, B, and the sum are all represented
+as `SparseTensor` objects. This op takes in the upstream gradient w.r.t.
+non-empty values of the sum, and outputs the gradients w.r.t. the non-empty
+values of A and B.
+END
+}
+op {
+ graph_op_name: "SparseApplyAdadelta"
+ endpoint {
+ name: "SparseApplyAdadelta"
+ }
+ summary: "var: Should be from a Variable()."
+}
+op {
+ graph_op_name: "SparseApplyAdagrad"
+ endpoint {
+ name: "SparseApplyAdagrad"
+ }
+ summary: "Update relevant entries in \'*var\' and \'*accum\' according to the adagrad scheme."
+ description: <<END
+That is for rows we have grad for, we update var and accum as follows:
+accum += grad * grad
+var -= lr * grad * (1 / sqrt(accum))
+END
+}
+op {
+ graph_op_name: "SparseApplyAdagradDA"
+ endpoint {
+ name: "SparseApplyAdagradDA"
+ }
+ summary: "Update entries in \'*var\' and \'*accum\' according to the proximal adagrad scheme."
+}
+op {
+ graph_op_name: "SparseApplyCenteredRMSProp"
+ endpoint {
+ name: "SparseApplyCenteredRMSProp"
+ }
+ summary: "Update \'*var\' according to the centered RMSProp algorithm."
+ description: <<END
+The centered RMSProp algorithm uses an estimate of the centered second moment
+(i.e., the variance) for normalization, as opposed to regular RMSProp, which
+uses the (uncentered) second moment. This often helps with training, but is
+slightly more expensive in terms of computation and memory.
+
+Note that in dense implementation of this algorithm, mg, ms, and mom will
+update even if the grad is zero, but in this sparse implementation, mg, ms,
+and mom will not update in iterations during which the grad is zero.
+
+mean_square = decay * mean_square + (1-decay) * gradient ** 2
+mean_grad = decay * mean_grad + (1-decay) * gradient
+Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)
+
+ms <- rho * ms_{t-1} + (1-rho) * grad * grad
+mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
+var <- var - mom
+END
+}
+op {
+ graph_op_name: "SparseApplyFtrl"
+ endpoint {
+ name: "SparseApplyFtrl"
+ }
+ summary: "Update relevant entries in \'*var\' according to the Ftrl-proximal scheme."
+ description: <<END
+That is for rows we have grad for, we update var, accum and linear as follows:
+accum_new = accum + grad * grad
+linear += grad + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
+quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
+var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
+accum = accum_new
+END
+}
+op {
+ graph_op_name: "SparseApplyFtrlV2"
+ endpoint {
+ name: "SparseApplyFtrlV2"
+ }
+ summary: "Update relevant entries in \'*var\' according to the Ftrl-proximal scheme."
+ description: <<END
+That is for rows we have grad for, we update var, accum and linear as follows:
+grad_with_shrinkage = grad + 2 * l2_shrinkage * var
+accum_new = accum + grad_with_shrinkage * grad_with_shrinkage
+linear += grad_with_shrinkage +
+ (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
+quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
+var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
+accum = accum_new
+END
+}
+op {
+ graph_op_name: "SparseApplyMomentum"
+ endpoint {
+ name: "SparseApplyMomentum"
+ }
+ summary: "Update relevant entries in \'*var\' and \'*accum\' according to the momentum scheme."
+ description: <<END
+Set use_nesterov = True if you want to use Nesterov momentum.
+
+That is for rows we have grad for, we update var and accum as follows:
+
+accum = accum * momentum + grad
+var -= lr * accum
+END
+}
+op {
+ graph_op_name: "SparseApplyProximalAdagrad"
+ endpoint {
+ name: "SparseApplyProximalAdagrad"
+ }
+ summary: "Sparse update entries in \'*var\' and \'*accum\' according to FOBOS algorithm."
+ description: <<END
+That is for rows we have grad for, we update var and accum as follows:
+accum += grad * grad
+prox_v = var
+prox_v -= lr * grad * (1 / sqrt(accum))
+var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}
+END
+}
+op {
+ graph_op_name: "SparseApplyProximalGradientDescent"
+ endpoint {
+ name: "SparseApplyProximalGradientDescent"
+ }
+ summary: "Sparse update \'*var\' as FOBOS algorithm with fixed learning rate."
+ description: <<END
+That is for rows we have grad for, we update var as follows:
+prox_v = var - alpha * grad
+var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}
+END
+}
+op {
+ graph_op_name: "SparseApplyRMSProp"
+ endpoint {
+ name: "SparseApplyRMSProp"
+ }
+ summary: "Update \'*var\' according to the RMSProp algorithm."
+ description: <<END
+Note that in dense implementation of this algorithm, ms and mom will
+update even if the grad is zero, but in this sparse implementation, ms
+and mom will not update in iterations during which the grad is zero.
+
+mean_square = decay * mean_square + (1-decay) * gradient ** 2
+Delta = learning_rate * gradient / sqrt(mean_square + epsilon)
+
+ms <- rho * ms_{t-1} + (1-rho) * grad * grad
+mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
+var <- var - mom
+END
+}
+op {
+ graph_op_name: "SparseConcat"
+ endpoint {
+ name: "SparseConcat"
+ }
+ summary: "Concatenates a list of `SparseTensor` along the specified dimension."
+ description: <<END
+Concatenation is with respect to the dense versions of these sparse tensors.
+It is assumed that each input is a `SparseTensor` whose elements are ordered
+along increasing dimension number.
+
+All inputs' shapes must match, except for the concat dimension. The
+`indices`, `values`, and `shapes` lists must have the same length.
+
+The output shape is identical to the inputs', except along the concat
+dimension, where it is the sum of the inputs' sizes along that dimension.
+
+The output elements will be resorted to preserve the sort order along
+increasing dimension number.
+
+This op runs in `O(M log M)` time, where `M` is the total number of non-empty
+values across all inputs. This is due to the need for an internal sort in
+order to concatenate efficiently across an arbitrary dimension.
+
+For example, if `concat_dim = 1` and the inputs are
+
+ sp_inputs[0]: shape = [2, 3]
+ [0, 2]: "a"
+ [1, 0]: "b"
+ [1, 1]: "c"
+
+ sp_inputs[1]: shape = [2, 4]
+ [0, 1]: "d"
+ [0, 2]: "e"
+
+then the output will be
+
+ shape = [2, 7]
+ [0, 2]: "a"
+ [0, 4]: "d"
+ [0, 5]: "e"
+ [1, 0]: "b"
+ [1, 1]: "c"
+
+Graphically this is equivalent to doing
+
+ [ a] concat [ d e ] = [ a d e ]
+ [b c ] [ ] [b c ]
+END
+}
+op {
+ graph_op_name: "SparseConditionalAccumulator"
+ endpoint {
+ name: "SparseConditionalAccumulator"
+ }
+ summary: "A conditional accumulator for aggregating sparse gradients."
+ description: <<END
+The accumulator accepts gradients marked with local_step greater or
+equal to the most recent global_step known to the accumulator. The
+average can be extracted from the accumulator, provided sufficient
+gradients have been accumulated. Extracting the average automatically
+resets the aggregate to 0, and increments the global_step recorded by
+the accumulator.
+END
+}
+op {
+ graph_op_name: "SparseCross"
+ endpoint {
+ name: "SparseCross"
+ }
+ summary: "Generates sparse cross from a list of sparse and dense tensors."
+ description: <<END
+The op takes two lists, one of 2D `SparseTensor` and one of 2D `Tensor`, each
+representing features of one feature column. It outputs a 2D `SparseTensor` with
+the batchwise crosses of these features.
+
+For example, if the inputs are
+
+ inputs[0]: SparseTensor with shape = [2, 2]
+ [0, 0]: "a"
+ [1, 0]: "b"
+ [1, 1]: "c"
+
+ inputs[1]: SparseTensor with shape = [2, 1]
+ [0, 0]: "d"
+ [1, 0]: "e"
+
+ inputs[2]: Tensor [["f"], ["g"]]
+
+then the output will be
+
+ shape = [2, 2]
+ [0, 0]: "a_X_d_X_f"
+ [1, 0]: "b_X_e_X_g"
+ [1, 1]: "c_X_e_X_g"
+
+if hashed_output=true then the output will be
+
+ shape = [2, 2]
+ [0, 0]: FingerprintCat64(
+ Fingerprint64("f"), FingerprintCat64(
+ Fingerprint64("d"), Fingerprint64("a")))
+ [1, 0]: FingerprintCat64(
+ Fingerprint64("g"), FingerprintCat64(
+ Fingerprint64("e"), Fingerprint64("b")))
+ [1, 1]: FingerprintCat64(
+ Fingerprint64("g"), FingerprintCat64(
+ Fingerprint64("e"), Fingerprint64("c")))
+END
+}
+op {
+ graph_op_name: "SparseDenseCwiseAdd"
+ endpoint {
+ name: "SparseDenseCwiseAdd"
+ }
+ summary: "Adds up a SparseTensor and a dense Tensor, using these special rules:"
+ description: <<END
+(1) Broadcasts the dense side to have the same shape as the sparse side, if
+ eligible;
+(2) Then, only the dense values pointed to by the indices of the SparseTensor
+ participate in the cwise addition.
+
+By these rules, the result is a logical SparseTensor with exactly the same
+indices and shape, but possibly with different non-zero values. The output of
+this Op is the resultant non-zero values.
+END
+}
+op {
+ graph_op_name: "SparseDenseCwiseDiv"
+ endpoint {
+ name: "SparseDenseCwiseDiv"
+ }
+ summary: "Component-wise divides a SparseTensor by a dense Tensor."
+ description: <<END
+*Limitation*: this Op only broadcasts the dense side to the sparse side, but not
+the other direction.
+END
+}
+op {
+ graph_op_name: "SparseDenseCwiseMul"
+ endpoint {
+ name: "SparseDenseCwiseMul"
+ }
+ summary: "Component-wise multiplies a SparseTensor by a dense Tensor."
+ description: <<END
+The output locations corresponding to the implicitly zero elements in the sparse
+tensor will be zero (i.e., will not take up storage space), regardless of the
+contents of the dense tensor (even if it's +/-INF and that INF*0 == NaN).
+
+*Limitation*: this Op only broadcasts the dense side to the sparse side, but not
+the other direction.
+END
+}
+op {
+ graph_op_name: "SparseFillEmptyRows"
+ endpoint {
+ name: "SparseFillEmptyRows"
+ }
+ summary: "Fills empty rows in the input 2-D `SparseTensor` with a default value."
+ description: <<END
+The input `SparseTensor` is represented via the tuple of inputs
+(`indices`, `values`, `dense_shape`). The output `SparseTensor` has the
+same `dense_shape` but with indices `output_indices` and values
+`output_values`.
+
+This op inserts a single entry for every row that doesn't have any values.
+The index is created as `[row, 0, ..., 0]` and the inserted value
+is `default_value`.
+
+For example, suppose `sp_input` has shape `[5, 6]` and non-empty values:
+
+ [0, 1]: a
+ [0, 3]: b
+ [2, 0]: c
+ [3, 1]: d
+
+Rows 1 and 4 are empty, so the output will be of shape `[5, 6]` with values:
+
+ [0, 1]: a
+ [0, 3]: b
+ [1, 0]: default_value
+ [2, 0]: c
+ [3, 1]: d
+ [4, 0]: default_value
+
+The output `SparseTensor` will be in row-major order and will have the
+same shape as the input.
+
+This op also returns an indicator vector shaped `[dense_shape[0]]` such that
+
+ empty_row_indicator[i] = True iff row i was an empty row.
+
+And a reverse index map vector shaped `[indices.shape[0]]` that is used during
+backpropagation,
+
+ reverse_index_map[j] = out_j s.t. indices[j, :] == output_indices[out_j, :]
+END
+}
+op {
+ graph_op_name: "SparseFillEmptyRowsGrad"
+ endpoint {
+ name: "SparseFillEmptyRowsGrad"
+ }
+ summary: "The gradient of SparseFillEmptyRows."
+ description: <<END
+Takes vectors reverse_index_map, shaped `[N]`, and grad_values,
+shaped `[N_full]`, where `N_full >= N` and copies data into either
+`d_values` or `d_default_value`. Here `d_values` is shaped `[N]` and
+`d_default_value` is a scalar.
+
+ d_values[j] = grad_values[reverse_index_map[j]]
+ d_default_value = sum_{k : 0 .. N_full - 1} (
+ grad_values[k] * 1{k not in reverse_index_map})
+END
+}
+op {
+ graph_op_name: "SparseMatMul"
+ endpoint {
+ name: "SparseMatMul"
+ }
+ summary: "Multiply matrix \"a\" by matrix \"b\"."
+ description: <<END
+The inputs must be two-dimensional matrices and the inner dimension of "a" must
+match the outer dimension of "b". This op is optimized for the case where at
+least one of "a" or "b" is sparse. The breakeven for using this versus a dense
+matrix multiply on one platform was 30% zero values in the sparse matrix.
+
+The gradient computation of this operation will only take advantage of sparsity
+in the input gradient when that gradient comes from a Relu.
+END
+}
+op {
+ graph_op_name: "SparseReduceMax"
+ endpoint {
+ name: "SparseReduceMax"
+ }
+ summary: "Computes the max of elements across dimensions of a SparseTensor."
+ description: <<END
+This Op takes a SparseTensor and is the sparse counterpart to
+`tf.reduce_max()`. In particular, this Op also returns a dense `Tensor`
+instead of a sparse one.
+
+Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless
+`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
+`reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
+with length 1.
+
+If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
+with a single element is returned. Additionally, the axes can be negative,
+which are interpreted according to the indexing rules in Python.
+END
+}
+op {
+ graph_op_name: "SparseReduceMaxSparse"
+ endpoint {
+ name: "SparseReduceMaxSparse"
+ }
+ summary: "Computes the max of elements across dimensions of a SparseTensor."
+ description: <<END
+This Op takes a SparseTensor and is the sparse counterpart to
+`tf.reduce_max()`. In contrast to SparseReduceMax, this Op returns a
+SparseTensor.
+
+Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless
+`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
+`reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
+with length 1.
+
+If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
+with a single element is returned. Additionally, the axes can be negative,
+which are interpreted according to the indexing rules in Python.
+END
+}
+op {
+ graph_op_name: "SparseReduceSum"
+ endpoint {
+ name: "SparseReduceSum"
+ }
+ summary: "Computes the sum of elements across dimensions of a SparseTensor."
+ description: <<END
+This Op takes a SparseTensor and is the sparse counterpart to
+`tf.reduce_sum()`. In particular, this Op also returns a dense `Tensor`
+instead of a sparse one.
+
+Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless
+`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
+`reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
+with length 1.
+
+If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
+with a single element is returned. Additionally, the axes can be negative,
+which are interpreted according to the indexing rules in Python.
+END
+}
+op {
+ graph_op_name: "SparseReduceSumSparse"
+ endpoint {
+ name: "SparseReduceSumSparse"
+ }
+ summary: "Computes the sum of elements across dimensions of a SparseTensor."
+ description: <<END
+This Op takes a SparseTensor and is the sparse counterpart to
+`tf.reduce_sum()`. In contrast to SparseReduceSum, this Op returns a
+SparseTensor.
+
+Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless
+`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
+`reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
+with length 1.
+
+If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
+with a single element is returned. Additionally, the axes can be negative,
+which are interpreted according to the indexing rules in Python.
+END
+}
+op {
+ graph_op_name: "SparseReorder"
+ endpoint {
+ name: "SparseReorder"
+ }
+ summary: "Reorders a SparseTensor into the canonical, row-major ordering."
+ description: <<END
+Note that by convention, all sparse ops preserve the canonical ordering along
+increasing dimension number. The only time ordering can be violated is during
+manual manipulation of the indices and values vectors to add entries.
+
+Reordering does not affect the shape of the SparseTensor.
+
+If the tensor has rank `R` and `N` non-empty values, `input_indices` has
+shape `[N, R]`, input_values has length `N`, and input_shape has length `R`.
+END
+}
+op {
+ graph_op_name: "SparseReshape"
+ endpoint {
+ name: "SparseReshape"
+ }
+ summary: "Reshapes a SparseTensor to represent values in a new dense shape."
+ description: <<END
+This operation has the same semantics as reshape on the represented dense
+tensor. The `input_indices` are recomputed based on the requested `new_shape`.
+
+If one component of `new_shape` is the special value -1, the size of that
+dimension is computed so that the total dense size remains constant. At
+most one component of `new_shape` can be -1. The number of dense elements
+implied by `new_shape` must be the same as the number of dense elements
+originally implied by `input_shape`.
+
+Reshaping does not affect the order of values in the SparseTensor.
+
+If the input tensor has rank `R_in` and `N` non-empty values, and `new_shape`
+has length `R_out`, then `input_indices` has shape `[N, R_in]`,
+`input_shape` has length `R_in`, `output_indices` has shape `[N, R_out]`, and
+`output_shape` has length `R_out`.
+END
+}
+op {
+ graph_op_name: "SparseSegmentMean"
+ endpoint {
+ name: "SparseSegmentMean"
+ }
+ summary: "Computes the mean along sparse segments of a tensor."
+ description: <<END
+Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
+segments.
+
+Like `SegmentMean`, but `segment_ids` can have rank less than `data`'s first
+dimension, selecting a subset of dimension 0, specified by `indices`.
+END
+}
+op {
+ graph_op_name: "SparseSegmentMeanGrad"
+ endpoint {
+ name: "SparseSegmentMeanGrad"
+ }
+ summary: "Computes gradients for SparseSegmentMean."
+ description: <<END
+Returns tensor "output" with same shape as grad, except for dimension 0 whose
+value is output_dim0.
+END
+}
+op {
+ graph_op_name: "SparseSegmentSqrtN"
+ endpoint {
+ name: "SparseSegmentSqrtN"
+ }
+ summary: "Computes the sum along sparse segments of a tensor divided by the sqrt of N."
+ description: <<END
+N is the size of the segment being reduced.
+
+Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
+segments.
+END
+}
+op {
+ graph_op_name: "SparseSegmentSqrtNGrad"
+ endpoint {
+ name: "SparseSegmentSqrtNGrad"
+ }
+ summary: "Computes gradients for SparseSegmentSqrtN."
+ description: <<END
+Returns tensor "output" with same shape as grad, except for dimension 0 whose
+value is output_dim0.
+END
+}
+op {
+ graph_op_name: "SparseSegmentSum"
+ endpoint {
+ name: "SparseSegmentSum"
+ }
+ summary: "Computes the sum along sparse segments of a tensor."
+ description: <<END
+Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
+segments.
+
+Like `SegmentSum`, but `segment_ids` can have rank less than `data`'s first
+dimension, selecting a subset of dimension 0, specified by `indices`.
+
+For example:
+
+```python
+c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
+
+# Select two rows, one segment.
+tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0]))
+# => [[0 0 0 0]]
+
+# Select two rows, two segment.
+tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1]))
+# => [[ 1 2 3 4]
+# [-1 -2 -3 -4]]
+
+# Select all rows, two segments.
+tf.sparse_segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1]))
+# => [[0 0 0 0]
+# [5 6 7 8]]
+
+# Which is equivalent to:
+tf.segment_sum(c, tf.constant([0, 0, 1]))
+```
+END
+}
+op {
+ graph_op_name: "SparseSlice"
+ endpoint {
+ name: "SparseSlice"
+ }
+ summary: "Slice a `SparseTensor` based on the `start` and `size`."
+ description: <<END
+For example, if the input is
+
+ input_tensor = shape = [2, 7]
+ [ a d e ]
+ [b c ]
+
+Graphically the output tensors are:
+
+ sparse_slice([0, 0], [2, 4]) = shape = [2, 4]
+ [ a ]
+ [b c ]
+
+ sparse_slice([0, 4], [2, 3]) = shape = [2, 3]
+ [ d e ]
+ [ ]
+END
+}
+op {
+ graph_op_name: "SparseSoftmax"
+ endpoint {
+ name: "SparseSoftmax"
+ }
+ summary: "Applies softmax to a batched N-D `SparseTensor`."
+ description: <<END
+The inputs represent an N-D SparseTensor with logical shape `[..., B, C]`
+(where `N >= 2`), and with indices sorted in the canonical lexicographic order.
+
+This op is equivalent to applying the normal `tf.nn.softmax()` to each innermost
+logical submatrix with shape `[B, C]`, but with the catch that *the implicitly
+zero elements do not participate*. Specifically, the algorithm is equivalent
+to the following:
+
+ (1) Applies `tf.nn.softmax()` to a densified view of each innermost submatrix
+ with shape `[B, C]`, along the size-C dimension;
+ (2) Masks out the original implicitly-zero locations;
+ (3) Renormalizes the remaining elements.
+
+Hence, the `SparseTensor` result has exactly the same non-zero indices and
+shape.
+END
+}
+op {
+ graph_op_name: "SparseSoftmaxCrossEntropyWithLogits"
+ endpoint {
+ name: "SparseSoftmaxCrossEntropyWithLogits"
+ }
+ summary: "Computes softmax cross entropy cost and gradients to backpropagate."
+ description: <<END
+Unlike `SoftmaxCrossEntropyWithLogits`, this operation does not accept
+a matrix of label probabilities, but rather a single label per row
+of features. This label is considered to have probability 1.0 for the
+given row.
+
+Inputs are the logits, not probabilities.
+END
+}
+op {
+ graph_op_name: "SparseSparseMaximum"
+ endpoint {
+ name: "SparseSparseMaximum"
+ }
+ summary: "Returns the element-wise max of two SparseTensors."
+ description: <<END
+Assumes the two SparseTensors have the same shape, i.e., no broadcasting.
+END
+}
+op {
+ graph_op_name: "SparseSparseMinimum"
+ endpoint {
+ name: "SparseSparseMinimum"
+ }
+ summary: "Returns the element-wise min of two SparseTensors."
+ description: <<END
+Assumes the two SparseTensors have the same shape, i.e., no broadcasting.
+END
+}
+op {
+ graph_op_name: "SparseSplit"
+ endpoint {
+ name: "SparseSplit"
+ }
+ summary: "Split a `SparseTensor` into `num_split` tensors along one dimension."
+ description: <<END
+If the `shape[split_dim]` is not an integer multiple of `num_split`. Slices
+`[0 : shape[split_dim] % num_split]` gets one extra dimension.
+For example, if `split_dim = 1` and `num_split = 2` and the input is
+
+ input_tensor = shape = [2, 7]
+ [ a d e ]
+ [b c ]
+
+Graphically the output tensors are:
+
+ output_tensor[0] = shape = [2, 4]
+ [ a ]
+ [b c ]
+
+ output_tensor[1] = shape = [2, 3]
+ [ d e ]
+ [ ]
+END
+}
+op {
+ graph_op_name: "SparseTensorDenseAdd"
+ endpoint {
+ name: "SparseTensorDenseAdd"
+ }
+ summary: "Adds up a `SparseTensor` and a dense `Tensor`, producing a dense `Tensor`."
+ description: <<END
+This Op does not require `a_indices` be sorted in standard lexicographic order.
+END
+}
+op {
+ graph_op_name: "SparseTensorDenseMatMul"
+ endpoint {
+ name: "SparseTensorDenseMatMul"
+ }
+ summary: "Multiply SparseTensor (of rank 2) \"A\" by dense matrix \"B\"."
+ description: <<END
+No validity checking is performed on the indices of A. However, the following
+input format is recommended for optimal behavior:
+
+if adjoint_a == false:
+ A should be sorted in lexicographically increasing order. Use SparseReorder
+ if you're not sure.
+if adjoint_a == true:
+ A should be sorted in order of increasing dimension 1 (i.e., "column major"
+ order instead of "row major" order).
+END
+}
+op {
+ graph_op_name: "SparseTensorSliceDataset"
+ endpoint {
+ name: "SparseTensorSliceDataset"
+ }
+ summary: "Creates a dataset that splits a SparseTensor into elements row-wise."
+}
+op {
+ graph_op_name: "SparseToDense"
+ endpoint {
+ name: "SparseToDense"
+ }
+ summary: "Converts a sparse representation into a dense tensor."
+ description: <<END
+Builds an array `dense` with shape `output_shape` such that
+
+```
+# If sparse_indices is scalar
+dense[i] = (i == sparse_indices ? sparse_values : default_value)
+
+# If sparse_indices is a vector, then for each i
+dense[sparse_indices[i]] = sparse_values[i]
+
+# If sparse_indices is an n by d matrix, then for each i in [0, n)
+dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = sparse_values[i]
+```
+
+All other values in `dense` are set to `default_value`. If `sparse_values` is a
+scalar, all sparse indices are set to this single value.
+
+Indices should be sorted in lexicographic order, and indices must not
+contain any repeats. If `validate_indices` is true, these properties
+are checked during execution.
+END
+}
+op {
+ graph_op_name: "SparseToSparseSetOperation"
+ endpoint {
+ name: "SparseToSparseSetOperation"
+ }
+ summary: "Applies set operation along last dimension of 2 `SparseTensor` inputs."
+ description: <<END
+See SetOperationOp::SetOperationFromContext for values of `set_operation`.
+
+If `validate_indices` is `True`, `SparseToSparseSetOperation` validates the
+order and range of `set1` and `set2` indices.
+
+Input `set1` is a `SparseTensor` represented by `set1_indices`, `set1_values`,
+and `set1_shape`. For `set1` ranked `n`, 1st `n-1` dimensions must be the same
+as `set2`. Dimension `n` contains values in a set, duplicates are allowed but
+ignored.
+
+Input `set2` is a `SparseTensor` represented by `set2_indices`, `set2_values`,
+and `set2_shape`. For `set2` ranked `n`, 1st `n-1` dimensions must be the same
+as `set1`. Dimension `n` contains values in a set, duplicates are allowed but
+ignored.
+
+If `validate_indices` is `True`, this op validates the order and range of `set1`
+and `set2` indices.
+
+Output `result` is a `SparseTensor` represented by `result_indices`,
+`result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this
+has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth`
+dimension contains the result of `set_operation` applied to the corresponding
+`[0...n-1]` dimension of `set`.
+END
+}
+op {
+ graph_op_name: "Split"
+ endpoint {
+ name: "Split"
+ }
+ summary: "Splits a tensor into `num_split` tensors along one dimension."
+}
+op {
+ graph_op_name: "SplitV"
+ endpoint {
+ name: "SplitV"
+ }
+ summary: "Splits a tensor into `num_split` tensors along one dimension."
+}
+op {
+ graph_op_name: "SqlDataset"
+ endpoint {
+ name: "SqlDataset"
+ }
+ summary: "Creates a dataset that executes a SQL query and emits rows of the result set."
+}
+op {
+ graph_op_name: "Sqrt"
+ endpoint {
+ name: "Sqrt"
+ }
+ summary: "Computes square root of x element-wise."
+ description: <<END
+I.e., \\(y = \sqrt{x} = x^{1/2}\\).
+END
+}
+op {
+ graph_op_name: "SqrtGrad"
+ endpoint {
+ name: "SqrtGrad"
+ }
+ summary: "Computes the gradient for the sqrt of `x` wrt its input."
+ description: <<END
+Specifically, `grad = dy * 0.5 / y`, where `y = sqrt(x)`, and `dy`
+is the corresponding input gradient.
+END
+}
+op {
+ graph_op_name: "Square"
+ endpoint {
+ name: "Square"
+ }
+ summary: "Computes square of x element-wise."
+ description: <<END
+I.e., \\(y = x * x = x^2\\).
+END
+}
+op {
+ graph_op_name: "SquaredDifference"
+ endpoint {
+ name: "SquaredDifference"
+ }
+ summary: "Returns (x - y)(x - y) element-wise."
+ description: <<END
+*NOTE*: `SquaredDifference` supports broadcasting. More about broadcasting
+[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+END
+}
+op {
+ graph_op_name: "Squeeze"
+ endpoint {
+ name: "Squeeze"
+ }
+ summary: "Removes dimensions of size 1 from the shape of a tensor."
+ description: <<END
+Given a tensor `input`, this operation returns a tensor of the same type with
+all dimensions of size 1 removed. If you don't want to remove all size 1
+dimensions, you can remove specific size 1 dimensions by specifying
+`squeeze_dims`.
+
+For example:
+
+```
+# 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
+shape(squeeze(t)) ==> [2, 3]
+```
+
+Or, to remove specific size 1 dimensions:
+
+```
+# 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
+shape(squeeze(t, [2, 4])) ==> [1, 2, 3, 1]
+```
+END
+}
+op {
+ graph_op_name: "Stack"
+ endpoint {
+ name: "Stack"
+ }
+ summary: "Deprecated, use StackV2."
+}
+op {
+ graph_op_name: "StackClose"
+ endpoint {
+ name: "StackClose"
+ }
+ summary: "Deprecated, use StackCloseV2."
+}
+op {
+ graph_op_name: "StackCloseV2"
+ endpoint {
+ name: "StackCloseV2"
+ }
+ summary: "Delete the stack from its resource container."
+}
+op {
+ graph_op_name: "StackPop"
+ endpoint {
+ name: "StackPop"
+ }
+ summary: "Deprecated, use StackPopV2."
+}
+op {
+ graph_op_name: "StackPopV2"
+ endpoint {
+ name: "StackPopV2"
+ }
+ summary: "Pop the element at the top of the stack."
+}
+op {
+ graph_op_name: "StackPush"
+ endpoint {
+ name: "StackPush"
+ }
+ summary: "Deprecated, use StackPushV2."
+}
+op {
+ graph_op_name: "StackPushV2"
+ endpoint {
+ name: "StackPushV2"
+ }
+ summary: "Push an element onto the stack."
+}
+op {
+ graph_op_name: "StackV2"
+ endpoint {
+ name: "StackV2"
+ }
+ summary: "A stack that produces elements in first-in last-out order."
+}
+op {
+ graph_op_name: "Stage"
+ endpoint {
+ name: "Stage"
+ }
+ summary: "Stage values similar to a lightweight Enqueue."
+ description: <<END
+The basic functionality of this Op is similar to a queue with many
+fewer capabilities and options. This Op is optimized for performance.
+END
+}
+op {
+ graph_op_name: "StageClear"
+ endpoint {
+ name: "StageClear"
+ }
+ summary: "Op removes all elements in the underlying container."
+}
+op {
+ graph_op_name: "StagePeek"
+ endpoint {
+ name: "StagePeek"
+ }
+ summary: "Op peeks at the values at the specified index. If the"
+ description: <<END
+underlying container does not contain sufficient elements
+this op will block until it does. This Op is optimized for
+performance.
+END
+}
+op {
+ graph_op_name: "StageSize"
+ endpoint {
+ name: "StageSize"
+ }
+ summary: "Op returns the number of elements in the underlying container."
+}
+op {
+ graph_op_name: "StatelessRandomNormal"
+ endpoint {
+ name: "StatelessRandomNormal"
+ }
+ summary: "Outputs deterministic pseudorandom values from a normal distribution."
+ description: <<END
+The generated values will have mean 0 and standard deviation 1.
+
+The outputs are a deterministic function of `shape` and `seed`.
+END
+}
+op {
+ graph_op_name: "StatelessRandomUniform"
+ endpoint {
+ name: "StatelessRandomUniform"
+ }
+ summary: "Outputs deterministic pseudorandom random values from a uniform distribution."
+ description: <<END
+The generated values follow a uniform distribution in the range `[0, 1)`. The
+lower bound 0 is included in the range, while the upper bound 1 is excluded.
+
+The outputs are a deterministic function of `shape` and `seed`.
+END
+}
+op {
+ graph_op_name: "StatelessTruncatedNormal"
+ endpoint {
+ name: "StatelessTruncatedNormal"
+ }
+ summary: "Outputs deterministic pseudorandom values from a truncated normal distribution."
+ description: <<END
+The generated values follow a normal distribution with mean 0 and standard
+deviation 1, except that values whose magnitude is more than 2 standard
+deviations from the mean are dropped and re-picked.
+
+The outputs are a deterministic function of `shape` and `seed`.
+END
+}
+op {
+ graph_op_name: "StopGradient"
+ endpoint {
+ name: "StopGradient"
+ }
+ summary: "Stops gradient computation."
+ description: <<END
+When executed in a graph, this op outputs its input tensor as-is.
+
+When building ops to compute gradients, this op prevents the contribution of
+its inputs to be taken into account. Normally, the gradient generator adds ops
+to a graph to compute the derivatives of a specified 'loss' by recursively
+finding out inputs that contributed to its computation. If you insert this op
+in the graph it inputs are masked from the gradient generator. They are not
+taken into account for computing gradients.
+
+This is useful any time you want to compute a value with TensorFlow but need
+to pretend that the value was a constant. Some examples include:
+
+* The *EM* algorithm where the *M-step* should not involve backpropagation
+ through the output of the *E-step*.
+* Contrastive divergence training of Boltzmann machines where, when
+ differentiating the energy function, the training must not backpropagate
+ through the graph that generated the samples from the model.
+* Adversarial training, where no backprop should happen through the adversarial
+ example generation process.
+END
+}
+op {
+ graph_op_name: "StridedSlice"
+ endpoint {
+ name: "StridedSlice"
+ }
+ summary: "Return a strided slice from `input`."
+ description: <<END
+Note, most python users will want to use the Python `Tensor.__getitem__`
+or `Variable.__getitem__` rather than this op directly.
+
+The goal of this op is to produce a new tensor with a subset of
+the elements from the `n` dimensional `input` tensor. The subset is chosen using
+a sequence of `m` sparse range specifications encoded into the arguments
+of this function. Note, in some cases
+`m` could be equal to `n`, but this need not be the case. Each
+range specification entry can be one of the following:
+
+- An ellipsis (...). Ellipses are used to imply zero or more
+ dimensions of full-dimension selection and are produced using
+ `ellipsis_mask`. For example, `foo[...]` is the identity slice.
+
+- A new axis. This is used to insert a new shape=1 dimension and is
+ produced using `new_axis_mask`. For example, `foo[:, ...]` where
+ `foo` is shape `(3, 4)` produces a `(1, 3, 4)` tensor.
+
+
+- A range `begin:end:stride`. This is used to specify how much to choose from
+ a given dimension. `stride` can be any integer but 0. `begin` is an integer
+ which represents the index of the first value to select while `end` represents
+ the index of the last value to select. The number of values selected in each
+ dimension is `end - begin` if `stride > 0` and `begin - end` if `stride < 0`.
+ `begin` and `end` can be negative where `-1` is the last element, `-2` is
+ the second to last. `begin_mask` controls whether to replace the explicitly
+ given `begin` with an implicit effective value of `0` if `stride > 0` and
+ `-1` if `stride < 0`. `end_mask` is analogous but produces the number
+ required to create the largest open interval. For example, given a shape
+ `(3,)` tensor `foo[:]`, the effective `begin` and `end` are `0` and `3`. Do
+ not assume this is equivalent to `foo[0:-1]` which has an effective `begin`
+ and `end` of `0` and `2`. Another example is `foo[-2::-1]` which reverses the
+ first dimension of a tensor while dropping the last two (in the original
+ order elements). For example `foo = [1,2,3,4]; foo[-2::-1]` is `[4,3]`.
+
+- A single index. This is used to keep only elements that have a given
+ index. For example (`foo[2, :]` on a shape `(5,6)` tensor produces a
+ shape `(6,)` tensor. This is encoded in `begin` and `end` and
+ `shrink_axis_mask`.
+
+Each conceptual range specification is encoded in the op's argument. This
+encoding is best understand by considering a non-trivial example. In
+particular,
+`foo[1, 2:4, None, ..., :-3:-1, :]` will be encoded as
+
+```
+begin = [1, 2, x, x, 0, x] # x denotes don't care (usually 0)
+end = [2, 4, x, x, -3, x]
+strides = [1, 1, x, x, -1, 1]
+begin_mask = 1<<4 | 1 << 5 = 48
+end_mask = 1<<5 = 32
+ellipsis_mask = 1<<3 = 8
+new_axis_mask = 1<<2 4
+shrink_axis_mask = 1<<0
+```
+
+In this case if `foo.shape` is (5, 5, 5, 5, 5, 5) the final shape of
+the slice becomes (2, 1, 5, 5, 2, 5).
+Let us walk step by step through each argument specification.
+
+1. The first argument in the example slice is turned into `begin = 1` and
+`end = begin + 1 = 2`. To disambiguate from the original spec `2:4` we
+also set the appropriate bit in `shrink_axis_mask`.
+
+2. `2:4` is contributes 2, 4, 1 to begin, end, and stride. All masks have
+zero bits contributed.
+
+3. None is a synonym for `tf.newaxis`. This means insert a dimension of size 1
+dimension in the final shape. Dummy values are contributed to begin,
+end and stride, while the new_axis_mask bit is set.
+
+4. `...` grab the full ranges from as many dimensions as needed to
+fully specify a slice for every dimension of the input shape.
+
+5. `:-3:-1` shows the use of negative indices. A negative index `i` associated
+with a dimension that has shape `s` is converted to a positive index
+`s + i`. So `-1` becomes `s-1` (i.e. the last element). This conversion
+is done internally so begin, end and strides receive x, -3, and -1.
+The appropriate begin_mask bit is set to indicate the start range is the
+full range (ignoring the x).
+
+6. `:` indicates that the entire contents of the corresponding dimension
+is selected. This is equivalent to `::` or `0::1`. begin, end, and strides
+receive 0, 0, and 1, respectively. The appropriate bits in `begin_mask` and
+`end_mask` are also set.
+
+*Requirements*:
+ `0 != strides[i] for i in [0, m)`
+ `ellipsis_mask must be a power of two (only one ellipsis)`
+END
+}
+op {
+ graph_op_name: "StridedSliceAssign"
+ endpoint {
+ name: "StridedSliceAssign"
+ }
+ summary: "Assign `value` to the sliced l-value reference of `ref`."
+ description: <<END
+The values of `value` are assigned to the positions in the variable
+`ref` that are selected by the slice parameters. The slice parameters
+`begin, `end`, `strides`, etc. work exactly as in `StridedSlice`.
+
+NOTE this op currently does not support broadcasting and so `value`'s
+shape must be exactly the shape produced by the slice of `ref`.
+END
+}
+op {
+ graph_op_name: "StridedSliceGrad"
+ endpoint {
+ name: "StridedSliceGrad"
+ }
+ summary: "Returns the gradient of `StridedSlice`."
+ description: <<END
+Since `StridedSlice` cuts out pieces of its `input` which is size
+`shape`, its gradient will have the same shape (which is passed here
+as `shape`). The gradient will be zero in any element that the slice
+does not select.
+
+Arguments are the same as StridedSliceGrad with the exception that
+`dy` is the input gradient to be propagated and `shape` is the
+shape of `StridedSlice`'s `input`.
+END
+}
+op {
+ graph_op_name: "StringJoin"
+ endpoint {
+ name: "StringJoin"
+ }
+ summary: "Joins the strings in the given list of string tensors into one tensor;"
+ description: <<END
+with the given separator (default is an empty separator).
+END
+}
+op {
+ graph_op_name: "StringSplit"
+ endpoint {
+ name: "StringSplit"
+ }
+ summary: "Split elements of `input` based on `delimiter` into a `SparseTensor`."
+ description: <<END
+Let N be the size of source (typically N will be the batch size). Split each
+element of `input` based on `delimiter` and return a `SparseTensor`
+containing the splitted tokens. Empty tokens are ignored.
+
+`delimiter` can be empty, or a string of split characters. If `delimiter` is an
+ empty string, each element of `input` is split into individual single-byte
+ character strings, including splitting of UTF-8 multibyte sequences. Otherwise
+ every character of `delimiter` is a potential split point.
+
+For example:
+ N = 2, input[0] is 'hello world' and input[1] is 'a b c', then the output
+ will be
+
+ indices = [0, 0;
+ 0, 1;
+ 1, 0;
+ 1, 1;
+ 1, 2]
+ shape = [2, 3]
+ values = ['hello', 'world', 'a', 'b', 'c']
+END
+}
+op {
+ graph_op_name: "StringToHashBucket"
+ endpoint {
+ name: "StringToHashBucket"
+ }
+ summary: "Converts each string in the input Tensor to its hash mod by a number of buckets."
+ description: <<END
+The hash function is deterministic on the content of the string within the
+process.
+
+Note that the hash function may change from time to time.
+This functionality will be deprecated and it's recommended to use
+`tf.string_to_hash_bucket_fast()` or `tf.string_to_hash_bucket_strong()`.
+END
+}
+op {
+ graph_op_name: "StringToHashBucketFast"
+ endpoint {
+ name: "StringToHashBucketFast"
+ }
+ summary: "Converts each string in the input Tensor to its hash mod by a number of buckets."
+ description: <<END
+The hash function is deterministic on the content of the string within the
+process and will never change. However, it is not suitable for cryptography.
+This function may be used when CPU time is scarce and inputs are trusted or
+unimportant. There is a risk of adversaries constructing inputs that all hash
+to the same bucket. To prevent this problem, use a strong hash function with
+`tf.string_to_hash_bucket_strong`.
+END
+}
+op {
+ graph_op_name: "StringToHashBucketStrong"
+ endpoint {
+ name: "StringToHashBucketStrong"
+ }
+ summary: "Converts each string in the input Tensor to its hash mod by a number of buckets."
+ description: <<END
+The hash function is deterministic on the content of the string within the
+process. The hash function is a keyed hash function, where attribute `key`
+defines the key of the hash function. `key` is an array of 2 elements.
+
+A strong hash is important when inputs may be malicious, e.g. URLs with
+additional components. Adversaries could try to make their inputs hash to the
+same bucket for a denial-of-service attack or to skew the results. A strong
+hash prevents this by making it difficult, if not infeasible, to compute inputs
+that hash to the same bucket. This comes at a cost of roughly 4x higher compute
+time than `tf.string_to_hash_bucket_fast`.
+END
+}
+op {
+ graph_op_name: "StringToNumber"
+ endpoint {
+ name: "StringToNumber"
+ }
+ summary: "Converts each string in the input Tensor to the specified numeric type."
+ description: <<END
+(Note that int32 overflow results in an error while float overflow
+results in a rounded value.)
+END
+}
+op {
+ graph_op_name: "Sub"
+ endpoint {
+ name: "Sub"
+ }
+ summary: "Returns x - y element-wise."
+ description: <<END
+*NOTE*: `Sub` supports broadcasting. More about broadcasting
+[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+END
+}
+op {
+ graph_op_name: "Substr"
+ endpoint {
+ name: "Substr"
+ }
+ summary: "Return substrings from `Tensor` of strings."
+ description: <<END
+For each string in the input `Tensor`, creates a substring starting at index
+`pos` with a total length of `len`.
+
+If `len` defines a substring that would extend beyond the length of the input
+string, then as many characters as possible are used.
+
+If `pos` is negative or specifies a character index larger than any of the input
+strings, then an `InvalidArgumentError` is thrown.
+
+`pos` and `len` must have the same shape, otherwise a `ValueError` is thrown on
+Op creation.
+
+*NOTE*: `Substr` supports broadcasting up to two dimensions. More about
+broadcasting
+[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+
+---
+
+Examples
+
+Using scalar `pos` and `len`:
+
+```python
+input = [b'Hello', b'World']
+position = 1
+length = 3
+
+output = [b'ell', b'orl']
+```
+
+Using `pos` and `len` with same shape as `input`:
+
+```python
+input = [[b'ten', b'eleven', b'twelve'],
+ [b'thirteen', b'fourteen', b'fifteen'],
+ [b'sixteen', b'seventeen', b'eighteen']]
+position = [[1, 2, 3],
+ [1, 2, 3],
+ [1, 2, 3]]
+length = [[2, 3, 4],
+ [4, 3, 2],
+ [5, 5, 5]]
+
+output = [[b'en', b'eve', b'lve'],
+ [b'hirt', b'urt', b'te'],
+ [b'ixtee', b'vente', b'hteen']]
+```
+
+Broadcasting `pos` and `len` onto `input`:
+
+```
+input = [[b'ten', b'eleven', b'twelve'],
+ [b'thirteen', b'fourteen', b'fifteen'],
+ [b'sixteen', b'seventeen', b'eighteen'],
+ [b'nineteen', b'twenty', b'twentyone']]
+position = [1, 2, 3]
+length = [1, 2, 3]
+
+output = [[b'e', b'ev', b'lve'],
+ [b'h', b'ur', b'tee'],
+ [b'i', b've', b'hte'],
+ [b'i', b'en', b'nty']]
+```
+
+Broadcasting `input` onto `pos` and `len`:
+
+```
+input = b'thirteen'
+position = [1, 5, 7]
+length = [3, 2, 1]
+
+output = [b'hir', b'ee', b'n']
+```
+END
+}
+op {
+ graph_op_name: "Sum"
+ endpoint {
+ name: "Sum"
+ }
+ summary: "Computes the sum of elements across dimensions of a tensor."
+ description: <<END
+Reduces `input` along the dimensions given in `reduction_indices`. Unless
+`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
+`reduction_indices`. If `keep_dims` is true, the reduced dimensions are
+retained with length 1.
+END
+}
+op {
+ graph_op_name: "Svd"
+ endpoint {
+ name: "Svd"
+ }
+ summary: "Computes the singular value decompositions of one or more matrices."
+ description: <<END
+Computes the SVD of each inner matrix in `input` such that
+`input[..., :, :] = u[..., :, :] * diag(s[..., :, :]) * transpose(v[..., :, :])`
+
+```python
+# a is a tensor containing a batch of matrices.
+# s is a tensor of singular values for each matrix.
+# u is the tensor containing of left singular vectors for each matrix.
+# v is the tensor containing of right singular vectors for each matrix.
+s, u, v = svd(a)
+s, _, _ = svd(a, compute_uv=False)
+```
+END
+}
+op {
+ graph_op_name: "Switch"
+ endpoint {
+ name: "Switch"
+ }
+ summary: "Forwards `data` to the output port determined by `pred`."
+ description: <<END
+If `pred` is true, the `data` input is forwarded to `output_true`. Otherwise,
+the data goes to `output_false`.
+
+See also `RefSwitch` and `Merge`.
+END
+}
+op {
+ graph_op_name: "SymbolicGradient"
+ endpoint {
+ name: "SymbolicGradient"
+ }
+ summary: "Computes the gradient function for function f via backpropagation."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_T.pbtxt b/tensorflow/core/api_def/base_api/api_def_T.pbtxt
new file mode 100644
index 0000000000..8d1cbbcc06
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_T.pbtxt
@@ -0,0 +1,619 @@
+op {
+ graph_op_name: "TFRecordDataset"
+ endpoint {
+ name: "TFRecordDataset"
+ }
+ summary: "Creates a dataset that emits the records from one or more TFRecord files."
+}
+op {
+ graph_op_name: "TFRecordReader"
+ endpoint {
+ name: "TFRecordReader"
+ }
+ summary: "A Reader that outputs the records from a TensorFlow Records file."
+}
+op {
+ graph_op_name: "TFRecordReaderV2"
+ endpoint {
+ name: "TFRecordReaderV2"
+ }
+ summary: "A Reader that outputs the records from a TensorFlow Records file."
+}
+op {
+ graph_op_name: "TakeDataset"
+ endpoint {
+ name: "TakeDataset"
+ }
+ summary: "Creates a dataset that contains `count` elements from the `input_dataset`."
+}
+op {
+ graph_op_name: "TakeManySparseFromTensorsMap"
+ endpoint {
+ name: "TakeManySparseFromTensorsMap"
+ }
+ summary: "Read `SparseTensors` from a `SparseTensorsMap` and concatenate them."
+ description: <<END
+The input `sparse_handles` must be an `int64` matrix of shape `[N, 1]` where
+`N` is the minibatch size and the rows correspond to the output handles of
+`AddSparseToTensorsMap` or `AddManySparseToTensorsMap`. The ranks of the
+original `SparseTensor` objects that went into the given input ops must all
+match. When the final `SparseTensor` is created, it has rank one
+higher than the ranks of the incoming `SparseTensor` objects
+(they have been concatenated along a new row dimension on the left).
+
+The output `SparseTensor` object's shape values for all dimensions but the
+first are the max across the input `SparseTensor` objects' shape values
+for the corresponding dimensions. Its first shape value is `N`, the minibatch
+size.
+
+The input `SparseTensor` objects' indices are assumed ordered in
+standard lexicographic order. If this is not the case, after this
+step run `SparseReorder` to restore index ordering.
+
+For example, if the handles represent an input, which is a `[2, 3]` matrix
+representing two original `SparseTensor` objects:
+
+```
+ index = [ 0]
+ [10]
+ [20]
+ values = [1, 2, 3]
+ shape = [50]
+```
+
+and
+
+```
+ index = [ 2]
+ [10]
+ values = [4, 5]
+ shape = [30]
+```
+
+then the final `SparseTensor` will be:
+
+```
+ index = [0 0]
+ [0 10]
+ [0 20]
+ [1 2]
+ [1 10]
+ values = [1, 2, 3, 4, 5]
+ shape = [2 50]
+```
+END
+}
+op {
+ graph_op_name: "Tan"
+ endpoint {
+ name: "Tan"
+ }
+ summary: "Computes tan of x element-wise."
+}
+op {
+ graph_op_name: "Tanh"
+ endpoint {
+ name: "Tanh"
+ }
+ summary: "Computes hyperbolic tangent of `x` element-wise."
+}
+op {
+ graph_op_name: "TanhGrad"
+ endpoint {
+ name: "TanhGrad"
+ }
+ summary: "Computes the gradient for the tanh of `x` wrt its input."
+ description: <<END
+Specifically, `grad = dy * (1 - y*y)`, where `y = tanh(x)`, and `dy`
+is the corresponding input gradient.
+END
+}
+op {
+ graph_op_name: "TemporaryVariable"
+ endpoint {
+ name: "TemporaryVariable"
+ }
+ summary: "Returns a tensor that may be mutated, but only persists within a single step."
+ description: <<END
+This is an experimental op for internal use only and it is possible to use this
+op in unsafe ways. DO NOT USE unless you fully understand the risks.
+
+It is the caller's responsibility to ensure that 'ref' is eventually passed to a
+matching 'DestroyTemporaryVariable' op after all other uses have completed.
+
+Outputs a ref to the tensor state so it may be read or modified.
+
+ E.g.
+ var = state_ops._temporary_variable([1, 2], types.float_)
+ var_name = var.op.name
+ var = state_ops.assign(var, [[4.0, 5.0]])
+ var = state_ops.assign_add(var, [[6.0, 7.0]])
+ final = state_ops._destroy_temporary_variable(var, var_name=var_name)
+END
+}
+op {
+ graph_op_name: "TensorArray"
+ endpoint {
+ name: "TensorArray"
+ }
+}
+op {
+ graph_op_name: "TensorArrayClose"
+ endpoint {
+ name: "TensorArrayClose"
+ }
+}
+op {
+ graph_op_name: "TensorArrayCloseV2"
+ endpoint {
+ name: "TensorArrayCloseV2"
+ }
+ summary: "Deprecated. Use TensorArrayCloseV3"
+}
+op {
+ graph_op_name: "TensorArrayCloseV3"
+ endpoint {
+ name: "TensorArrayCloseV3"
+ }
+ summary: "Delete the TensorArray from its resource container."
+ description: <<END
+This enables the user to close and release the resource in the middle
+of a step/run.
+END
+}
+op {
+ graph_op_name: "TensorArrayConcat"
+ endpoint {
+ name: "TensorArrayConcat"
+ }
+}
+op {
+ graph_op_name: "TensorArrayConcatV2"
+ endpoint {
+ name: "TensorArrayConcatV2"
+ }
+ summary: "Deprecated. Use TensorArrayConcatV3"
+}
+op {
+ graph_op_name: "TensorArrayConcatV3"
+ endpoint {
+ name: "TensorArrayConcatV3"
+ }
+ summary: "Concat the elements from the TensorArray into value `value`."
+ description: <<END
+Takes `T` elements of shapes
+
+ ```
+ (n0 x d0 x d1 x ...), (n1 x d0 x d1 x ...), ..., (n(T-1) x d0 x d1 x ...)
+ ```
+
+and concatenates them into a Tensor of shape:
+
+ ```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```
+
+All elements must have the same shape (excepting the first dimension).
+END
+}
+op {
+ graph_op_name: "TensorArrayGather"
+ endpoint {
+ name: "TensorArrayGather"
+ }
+}
+op {
+ graph_op_name: "TensorArrayGatherV2"
+ endpoint {
+ name: "TensorArrayGatherV2"
+ }
+ summary: "Deprecated. Use TensorArrayGatherV3"
+}
+op {
+ graph_op_name: "TensorArrayGatherV3"
+ endpoint {
+ name: "TensorArrayGatherV3"
+ }
+ summary: "Gather specific elements from the TensorArray into output `value`."
+ description: <<END
+All elements selected by `indices` must have the same shape.
+END
+}
+op {
+ graph_op_name: "TensorArrayGrad"
+ endpoint {
+ name: "TensorArrayGrad"
+ }
+}
+op {
+ graph_op_name: "TensorArrayGradV2"
+ endpoint {
+ name: "TensorArrayGradV2"
+ }
+ summary: "Deprecated. Use TensorArrayGradV3"
+}
+op {
+ graph_op_name: "TensorArrayGradV3"
+ endpoint {
+ name: "TensorArrayGradV3"
+ }
+ summary: "Creates a TensorArray for storing the gradients of values in the given handle."
+ description: <<END
+If the given TensorArray gradient already exists, returns a reference to it.
+
+Locks the size of the original TensorArray by disabling its dynamic size flag.
+
+**A note about the input flow_in:**
+
+The handle flow_in forces the execution of the gradient lookup to occur
+only after certain other operations have occurred. For example, when
+the forward TensorArray is dynamically sized, writes to this TensorArray
+may resize the object. The gradient TensorArray is statically sized based
+on the size of the forward TensorArray when this operation executes.
+Furthermore, the size of the forward TensorArray is frozen by this call.
+As a result, the flow is used to ensure that the call to generate the gradient
+TensorArray only happens after all writes are executed.
+
+In the case of dynamically sized TensorArrays, gradient computation should
+only be performed on read operations that have themselves been chained via
+flow to occur only after all writes have executed. That way the final size
+of the forward TensorArray is known when this operation is called.
+
+**A note about the source attribute:**
+
+TensorArray gradient calls use an accumulator TensorArray object. If
+multiple gradients are calculated and run in the same session, the multiple
+gradient nodes may accidentally flow through the same accumulator TensorArray.
+This double counts and generally breaks the TensorArray gradient flow.
+
+The solution is to identify which gradient call this particular
+TensorArray gradient is being called in. This is performed by identifying
+a unique string (e.g. "gradients", "gradients_1", ...) from the input
+gradient Tensor's name. This string is used as a suffix when creating
+the TensorArray gradient object here (the attribute `source`).
+
+The attribute `source` is added as a suffix to the forward TensorArray's
+name when performing the creation / lookup, so that each separate gradient
+calculation gets its own TensorArray accumulator.
+END
+}
+op {
+ graph_op_name: "TensorArrayPack"
+ endpoint {
+ name: "TensorArrayPack"
+ }
+}
+op {
+ graph_op_name: "TensorArrayRead"
+ endpoint {
+ name: "TensorArrayRead"
+ }
+}
+op {
+ graph_op_name: "TensorArrayReadV2"
+ endpoint {
+ name: "TensorArrayReadV2"
+ }
+ summary: "Deprecated. Use TensorArrayReadV3"
+}
+op {
+ graph_op_name: "TensorArrayReadV3"
+ endpoint {
+ name: "TensorArrayReadV3"
+ }
+ summary: "Read an element from the TensorArray into output `value`."
+}
+op {
+ graph_op_name: "TensorArrayScatter"
+ endpoint {
+ name: "TensorArrayScatter"
+ }
+}
+op {
+ graph_op_name: "TensorArrayScatterV2"
+ endpoint {
+ name: "TensorArrayScatterV2"
+ }
+ summary: "Deprecated. Use TensorArrayScatterV3"
+}
+op {
+ graph_op_name: "TensorArrayScatterV3"
+ endpoint {
+ name: "TensorArrayScatterV3"
+ }
+ summary: "Scatter the data from the input value into specific TensorArray elements."
+ description: <<END
+`indices` must be a vector, its length must match the first dim of `value`.
+END
+}
+op {
+ graph_op_name: "TensorArraySize"
+ endpoint {
+ name: "TensorArraySize"
+ }
+}
+op {
+ graph_op_name: "TensorArraySizeV2"
+ endpoint {
+ name: "TensorArraySizeV2"
+ }
+ summary: "Deprecated. Use TensorArraySizeV3"
+}
+op {
+ graph_op_name: "TensorArraySizeV3"
+ endpoint {
+ name: "TensorArraySizeV3"
+ }
+ summary: "Get the current size of the TensorArray."
+}
+op {
+ graph_op_name: "TensorArraySplit"
+ endpoint {
+ name: "TensorArraySplit"
+ }
+}
+op {
+ graph_op_name: "TensorArraySplitV2"
+ endpoint {
+ name: "TensorArraySplitV2"
+ }
+ summary: "Deprecated. Use TensorArraySplitV3"
+}
+op {
+ graph_op_name: "TensorArraySplitV3"
+ endpoint {
+ name: "TensorArraySplitV3"
+ }
+ summary: "Split the data from the input value into TensorArray elements."
+ description: <<END
+Assuming that `lengths` takes on values
+
+ ```(n0, n1, ..., n(T-1))```
+
+and that `value` has shape
+
+ ```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```,
+
+this splits values into a TensorArray with T tensors.
+
+TensorArray index t will be the subtensor of values with starting position
+
+ ```(n0 + n1 + ... + n(t-1), 0, 0, ...)```
+
+and having size
+
+ ```nt x d0 x d1 x ...```
+END
+}
+op {
+ graph_op_name: "TensorArrayUnpack"
+ endpoint {
+ name: "TensorArrayUnpack"
+ }
+}
+op {
+ graph_op_name: "TensorArrayV2"
+ endpoint {
+ name: "TensorArrayV2"
+ }
+ summary: "Deprecated. Use TensorArrayV3"
+}
+op {
+ graph_op_name: "TensorArrayV3"
+ endpoint {
+ name: "TensorArrayV3"
+ }
+ summary: "An array of Tensors of given size."
+ description: <<END
+Write data via Write and read via Read or Pack.
+END
+}
+op {
+ graph_op_name: "TensorArrayWrite"
+ endpoint {
+ name: "TensorArrayWrite"
+ }
+}
+op {
+ graph_op_name: "TensorArrayWriteV2"
+ endpoint {
+ name: "TensorArrayWriteV2"
+ }
+ summary: "Deprecated. Use TensorArrayGradV3"
+}
+op {
+ graph_op_name: "TensorArrayWriteV3"
+ endpoint {
+ name: "TensorArrayWriteV3"
+ }
+ summary: "Push an element onto the tensor_array."
+}
+op {
+ graph_op_name: "TensorDataset"
+ endpoint {
+ name: "TensorDataset"
+ }
+ summary: "Creates a dataset that emits `components` as a tuple of tensors once."
+}
+op {
+ graph_op_name: "TensorSliceDataset"
+ endpoint {
+ name: "TensorSliceDataset"
+ }
+ summary: "Creates a dataset that emits each dim-0 slice of `components` once."
+}
+op {
+ graph_op_name: "TensorSummary"
+ endpoint {
+ name: "TensorSummary"
+ }
+ summary: "Outputs a `Summary` protocol buffer with a tensor."
+ description: <<END
+This op is being phased out in favor of TensorSummaryV2, which lets callers pass
+a tag as well as a serialized SummaryMetadata proto string that contains
+plugin-specific data. We will keep this op to maintain backwards compatibility.
+END
+}
+op {
+ graph_op_name: "TensorSummaryV2"
+ endpoint {
+ name: "TensorSummaryV2"
+ }
+ summary: "Outputs a `Summary` protocol buffer with a tensor and per-plugin data."
+}
+op {
+ graph_op_name: "TextLineDataset"
+ endpoint {
+ name: "TextLineDataset"
+ }
+ summary: "Creates a dataset that emits the lines of one or more text files."
+}
+op {
+ graph_op_name: "TextLineReader"
+ endpoint {
+ name: "TextLineReader"
+ }
+ summary: "A Reader that outputs the lines of a file delimited by \'\\n\'."
+}
+op {
+ graph_op_name: "TextLineReaderV2"
+ endpoint {
+ name: "TextLineReaderV2"
+ }
+ summary: "A Reader that outputs the lines of a file delimited by \'\\n\'."
+}
+op {
+ graph_op_name: "ThreadUnsafeUnigramCandidateSampler"
+ endpoint {
+ name: "ThreadUnsafeUnigramCandidateSampler"
+ }
+ summary: "Generates labels for candidate sampling with a learned unigram distribution."
+ description: <<END
+See explanations of candidate sampling and the data formats at
+go/candidate-sampling.
+
+For each batch, this op picks a single set of sampled candidate labels.
+
+The advantages of sampling candidates per-batch are simplicity and the
+possibility of efficient dense matrix multiplication. The disadvantage is that
+the sampled candidates must be chosen independently of the context and of the
+true labels.
+END
+}
+op {
+ graph_op_name: "Tile"
+ endpoint {
+ name: "Tile"
+ }
+ summary: "Constructs a tensor by tiling a given tensor."
+ description: <<END
+This operation creates a new tensor by replicating `input` `multiples` times.
+The output tensor's i'th dimension has `input.dims(i) * multiples[i]` elements,
+and the values of `input` are replicated `multiples[i]` times along the 'i'th
+dimension. For example, tiling `[a b c d]` by `[2]` produces
+`[a b c d a b c d]`.
+END
+}
+op {
+ graph_op_name: "TileGrad"
+ endpoint {
+ name: "TileGrad"
+ }
+ summary: "Returns the gradient of `Tile`."
+ description: <<END
+Since `Tile` takes an input and repeats the input `multiples` times
+along each dimension, `TileGrad` takes in `multiples` and aggregates
+each repeated tile of `input` into `output`.
+END
+}
+op {
+ graph_op_name: "TopK"
+ endpoint {
+ name: "TopK"
+ }
+ summary: "Finds values and indices of the `k` largest elements for the last dimension."
+ description: <<END
+If the input is a vector (rank-1), finds the `k` largest entries in the vector
+and outputs their values and indices as vectors. Thus `values[j]` is the
+`j`-th largest entry in `input`, and its index is `indices[j]`.
+
+For matrices (resp. higher rank input), computes the top `k` entries in each
+row (resp. vector along the last dimension). Thus,
+
+ values.shape = indices.shape = input.shape[:-1] + [k]
+
+If two elements are equal, the lower-index element appears first.
+
+If `k` varies dynamically, use `TopKV2` below.
+END
+}
+op {
+ graph_op_name: "TopKV2"
+ endpoint {
+ name: "TopKV2"
+ }
+ summary: "Finds values and indices of the `k` largest elements for the last dimension."
+ description: <<END
+If the input is a vector (rank-1), finds the `k` largest entries in the vector
+and outputs their values and indices as vectors. Thus `values[j]` is the
+`j`-th largest entry in `input`, and its index is `indices[j]`.
+
+For matrices (resp. higher rank input), computes the top `k` entries in each
+row (resp. vector along the last dimension). Thus,
+
+ values.shape = indices.shape = input.shape[:-1] + [k]
+
+If two elements are equal, the lower-index element appears first.
+END
+}
+op {
+ graph_op_name: "Transpose"
+ endpoint {
+ name: "Transpose"
+ }
+ summary: "Shuffle dimensions of x according to a permutation."
+ description: <<END
+The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy:
+ `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]`
+END
+}
+op {
+ graph_op_name: "TruncateDiv"
+ endpoint {
+ name: "TruncateDiv"
+ }
+ summary: "Returns x / y element-wise for integer types."
+ description: <<END
+Truncation designates that negative numbers will round fractional quantities
+toward zero. I.e. -7 / 5 = 1. This matches C semantics but it is different
+than Python semantics. See `FloorDiv` for a division function that matches
+Python Semantics.
+
+*NOTE*: `TruncateDiv` supports broadcasting. More about broadcasting
+[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+END
+}
+op {
+ graph_op_name: "TruncateMod"
+ endpoint {
+ name: "TruncateMod"
+ }
+ summary: "Returns element-wise remainder of division. This emulates C semantics in that"
+ description: <<END
+the result here is consistent with a truncating divide. E.g. `truncate(x / y) *
+y + truncate_mod(x, y) = x`.
+
+*NOTE*: `TruncateMod` supports broadcasting. More about broadcasting
+[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+END
+}
+op {
+ graph_op_name: "TruncatedNormal"
+ endpoint {
+ name: "TruncatedNormal"
+ }
+ summary: "Outputs random values from a truncated normal distribution."
+ description: <<END
+The generated values follow a normal distribution with mean 0 and standard
+deviation 1, except that values whose magnitude is more than 2 standard
+deviations from the mean are dropped and re-picked.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_U.pbtxt b/tensorflow/core/api_def/base_api/api_def_U.pbtxt
new file mode 100644
index 0000000000..6699efc0e0
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_U.pbtxt
@@ -0,0 +1,150 @@
+op {
+ graph_op_name: "UniformCandidateSampler"
+ endpoint {
+ name: "UniformCandidateSampler"
+ }
+ summary: "Generates labels for candidate sampling with a uniform distribution."
+ description: <<END
+See explanations of candidate sampling and the data formats at
+go/candidate-sampling.
+
+For each batch, this op picks a single set of sampled candidate labels.
+
+The advantages of sampling candidates per-batch are simplicity and the
+possibility of efficient dense matrix multiplication. The disadvantage is that
+the sampled candidates must be chosen independently of the context and of the
+true labels.
+END
+}
+op {
+ graph_op_name: "Unique"
+ endpoint {
+ name: "Unique"
+ }
+ summary: "Finds unique elements in a 1-D tensor."
+ description: <<END
+This operation returns a tensor `y` containing all of the unique elements of `x`
+sorted in the same order that they occur in `x`. This operation also returns a
+tensor `idx` the same size as `x` that contains the index of each value of `x`
+in the unique output `y`. In other words:
+
+`y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`
+
+For example:
+
+```
+# tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
+y, idx = unique(x)
+y ==> [1, 2, 4, 7, 8]
+idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
+```
+END
+}
+op {
+ graph_op_name: "UniqueWithCounts"
+ endpoint {
+ name: "UniqueWithCounts"
+ }
+ summary: "Finds unique elements in a 1-D tensor."
+ description: <<END
+This operation returns a tensor `y` containing all of the unique elements of `x`
+sorted in the same order that they occur in `x`. This operation also returns a
+tensor `idx` the same size as `x` that contains the index of each value of `x`
+in the unique output `y`. Finally, it returns a third tensor `count` that
+contains the count of each element of `y` in `x`. In other words:
+
+`y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`
+
+For example:
+
+```
+# tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
+y, idx, count = unique_with_counts(x)
+y ==> [1, 2, 4, 7, 8]
+idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
+count ==> [2, 1, 3, 1, 2]
+```
+END
+}
+op {
+ graph_op_name: "Unpack"
+ endpoint {
+ name: "Unpack"
+ }
+ summary: "Unpacks a given dimension of a rank-`R` tensor into `num` rank-`(R-1)` tensors."
+ description: <<END
+Unpacks `num` tensors from `value` by chipping it along the `axis` dimension.
+For example, given a tensor of shape `(A, B, C, D)`;
+
+If `axis == 0` then the i'th tensor in `output` is the slice `value[i, :, :, :]`
+ and each tensor in `output` will have shape `(B, C, D)`. (Note that the
+ dimension unpacked along is gone, unlike `split`).
+
+If `axis == 1` then the i'th tensor in `output` is the slice `value[:, i, :, :]`
+ and each tensor in `output` will have shape `(A, C, D)`.
+Etc.
+
+This is the opposite of `pack`.
+END
+}
+op {
+ graph_op_name: "UnsortedSegmentMax"
+ endpoint {
+ name: "UnsortedSegmentMax"
+ }
+ summary: "Computes the Max along segments of a tensor."
+ description: <<END
+Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
+segments.
+
+This operator is similar to the [unsorted segment sum operator](../../../api_docs/python/math_ops.md#UnsortedSegmentSum).
+Instead of computing the sum over segments, it computes the maximum
+such that:
+
+\\(output_i = \max_j data_j\\) where max is over `j` such
+that `segment_ids[j] == i`.
+
+If the maximum is empty for a given segment ID `i`, it outputs the smallest possible value for specific numeric type,
+ `output[i] = numeric_limits<T>::min()`.
+
+<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
+<img style="width:100%" src="https://www.tensorflow.org/images/UnsortedSegmentMax.png" alt>
+</div>
+END
+}
+op {
+ graph_op_name: "UnsortedSegmentSum"
+ endpoint {
+ name: "UnsortedSegmentSum"
+ }
+ summary: "Computes the sum along segments of a tensor."
+ description: <<END
+Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
+segments.
+
+Computes a tensor such that
+`(output[i] = sum_{j...} data[j...]` where the sum is over tuples `j...` such
+that `segment_ids[j...] == i`. Unlike `SegmentSum`, `segment_ids`
+need not be sorted and need not cover all values in the full
+range of valid values.
+
+If the sum is empty for a given segment ID `i`, `output[i] = 0`.
+
+`num_segments` should equal the number of distinct segment IDs.
+
+<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
+<img style="width:100%" src="https://www.tensorflow.org/images/UnsortedSegmentSum.png" alt>
+</div>
+END
+}
+op {
+ graph_op_name: "Unstage"
+ endpoint {
+ name: "Unstage"
+ }
+ summary: "Op is similar to a lightweight Dequeue."
+ description: <<END
+The basic functionality is similar to dequeue with many fewer
+capabilities and options. This Op is optimized for performance.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_V.pbtxt b/tensorflow/core/api_def/base_api/api_def_V.pbtxt
new file mode 100644
index 0000000000..31cc147900
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_V.pbtxt
@@ -0,0 +1,19 @@
+op {
+ graph_op_name: "Variable"
+ endpoint {
+ name: "Variable"
+ }
+ summary: "Use VariableV2 instead."
+}
+op {
+ graph_op_name: "VariableV2"
+ endpoint {
+ name: "VariableV2"
+ }
+ summary: "Holds state in the form of a tensor that persists across steps."
+ description: <<END
+Outputs a ref to the tensor state so it may be read or modified.
+TODO(zhifengc/mrry): Adds a pointer to a more detail document
+about sharing states in tensorflow.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_W.pbtxt b/tensorflow/core/api_def/base_api/api_def_W.pbtxt
new file mode 100644
index 0000000000..9120fe334e
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_W.pbtxt
@@ -0,0 +1,72 @@
+op {
+ graph_op_name: "Where"
+ endpoint {
+ name: "Where"
+ }
+ summary: "Returns locations of true values in a boolean tensor."
+ description: <<END
+This operation returns the coordinates of true elements in `input`. The
+coordinates are returned in a 2-D tensor where the first dimension (rows)
+represents the number of true elements, and the second dimension (columns)
+represents the coordinates of the true elements. Keep in mind, the shape of
+the output tensor can vary depending on how many true values there are in
+`input`. Indices are output in row-major order.
+
+For example:
+
+```
+# 'input' tensor is [[True, False]
+# [True, False]]
+# 'input' has two true values, so output has two coordinates.
+# 'input' has rank of 2, so coordinates have two indices.
+where(input) ==> [[0, 0],
+ [1, 0]]
+
+# `input` tensor is [[[True, False]
+# [True, False]]
+# [[False, True]
+# [False, True]]
+# [[False, False]
+# [False, True]]]
+# 'input' has 5 true values, so output has 5 coordinates.
+# 'input' has rank of 3, so coordinates have three indices.
+where(input) ==> [[0, 0, 0],
+ [0, 1, 0],
+ [1, 0, 1],
+ [1, 1, 1],
+ [2, 1, 1]]
+```
+END
+}
+op {
+ graph_op_name: "WholeFileReader"
+ endpoint {
+ name: "WholeFileReader"
+ }
+ summary: "A Reader that outputs the entire contents of a file as a value."
+ description: <<END
+To use, enqueue filenames in a Queue. The output of ReaderRead will
+be a filename (key) and the contents of that file (value).
+END
+}
+op {
+ graph_op_name: "WholeFileReaderV2"
+ endpoint {
+ name: "WholeFileReaderV2"
+ }
+ summary: "A Reader that outputs the entire contents of a file as a value."
+ description: <<END
+To use, enqueue filenames in a Queue. The output of ReaderRead will
+be a filename (key) and the contents of that file (value).
+END
+}
+op {
+ graph_op_name: "WriteFile"
+ endpoint {
+ name: "WriteFile"
+ }
+ summary: "Writes contents to the file at input filename. Creates file and recursively"
+ description: <<END
+creates directory if not existing.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Z.pbtxt b/tensorflow/core/api_def/base_api/api_def_Z.pbtxt
new file mode 100644
index 0000000000..f83fef054c
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Z.pbtxt
@@ -0,0 +1,27 @@
+op {
+ graph_op_name: "ZerosLike"
+ endpoint {
+ name: "ZerosLike"
+ }
+ summary: "Returns a tensor of zeros with the same shape and type as x."
+}
+op {
+ graph_op_name: "Zeta"
+ endpoint {
+ name: "Zeta"
+ }
+ summary: "Compute the Hurwitz zeta function \\\\(\\zeta(x, q)\\\\)."
+ description: <<END
+The Hurwitz zeta function is defined as:
+
+
+\\(\zeta(x, q) = \sum_{n=0}^{\infty} (q + n)^{-x}\\)
+END
+}
+op {
+ graph_op_name: "ZipDataset"
+ endpoint {
+ name: "ZipDataset"
+ }
+ summary: "Creates a dataset that zips together `input_datasets`."
+}
diff --git a/tensorflow/core/api_def/update_api_def.sh b/tensorflow/core/api_def/update_api_def.sh
new file mode 100755
index 0000000000..07c76e6562
--- /dev/null
+++ b/tensorflow/core/api_def/update_api_def.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+
+# Script to update tensorflow/core/api_def/base_api/api_def*.pbtxt files.
+
+set -e
+
+current_file="$(readlink -f "$0")"
+current_dir="$(dirname "$current_file")"
+
+bazel build //tensorflow/core:api_test
+bazel-bin/tensorflow/core/api_test \
+ --update_api_def \
+ --api_def_dir="${current_dir}/base_api"
+
diff --git a/tensorflow/core/framework/op.h b/tensorflow/core/framework/op.h
index c765bc915f..f7f1ed2a88 100644
--- a/tensorflow/core/framework/op.h
+++ b/tensorflow/core/framework/op.h
@@ -75,7 +75,8 @@ class OpRegistry : public OpRegistryInterface {
const OpRegistrationData** op_reg_data) const override;
// Fills *ops with all registered OpDefs (except those with names
- // starting with '_' if include_internal == false).
+ // starting with '_' if include_internal == false) sorted in
+ // ascending alphabetical order.
void Export(bool include_internal, OpList* ops) const;
// Returns ASCII-format OpList for all registered OpDefs (except