aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar Vijay Vasudevan <vrv@google.com>2015-11-08 11:37:26 -0800
committerGravatar Vijay Vasudevan <vrv@google.com>2015-11-08 11:37:26 -0800
commite28c1dbab3506d536ded7f1b1f0a527d4cad1b14 (patch)
tree7d64cecfb4ce522f878b328b154158559b4bd9f7
parentec490db88a1b624157f24a61dee0bd7d3c2630de (diff)
TensorFlow: Upstream latest changes to git.
Changes: - Documentation changes: adding some examples for adding_an_op, fixes to some of the markdown, updates to docstrings, etc. - Remove Dockerfile for now -- still undergoing changes. Base CL: 107341050
-rw-r--r--tensorflow/core/public/session.h6
-rw-r--r--tensorflow/core/public/tensor.h10
-rw-r--r--tensorflow/g3doc/api_docs/cc/ClassEnv.md74
-rw-r--r--tensorflow/g3doc/api_docs/cc/ClassEnvWrapper.md74
-rw-r--r--tensorflow/g3doc/api_docs/cc/ClassRandomAccessFile.md24
-rw-r--r--tensorflow/g3doc/api_docs/cc/ClassSession.md54
-rw-r--r--tensorflow/g3doc/api_docs/cc/ClassStatus.md62
-rw-r--r--tensorflow/g3doc/api_docs/cc/ClassTensor.md227
-rw-r--r--tensorflow/g3doc/api_docs/cc/ClassTensorBuffer.md26
-rw-r--r--tensorflow/g3doc/api_docs/cc/ClassTensorShape.md128
-rw-r--r--tensorflow/g3doc/api_docs/cc/ClassTensorShapeIter.md22
-rw-r--r--tensorflow/g3doc/api_docs/cc/ClassTensorShapeUtils.md42
-rw-r--r--tensorflow/g3doc/api_docs/cc/ClassThread.md10
-rw-r--r--tensorflow/g3doc/api_docs/cc/ClassWritableFile.md26
-rw-r--r--tensorflow/g3doc/api_docs/cc/StructSessionOptions.md22
-rw-r--r--tensorflow/g3doc/api_docs/cc/StructState.md10
-rw-r--r--tensorflow/g3doc/api_docs/cc/StructTensorShapeDim.md10
-rw-r--r--tensorflow/g3doc/api_docs/cc/StructThreadOptions.md10
-rw-r--r--tensorflow/g3doc/api_docs/cc/index.md8
-rw-r--r--tensorflow/g3doc/api_docs/python/array_ops.md104
-rw-r--r--tensorflow/g3doc/api_docs/python/client.md60
-rw-r--r--tensorflow/g3doc/api_docs/python/constant_op.md52
-rw-r--r--tensorflow/g3doc/api_docs/python/control_flow_ops.md100
-rw-r--r--tensorflow/g3doc/api_docs/python/framework.md214
-rw-r--r--tensorflow/g3doc/api_docs/python/image.md96
-rw-r--r--tensorflow/g3doc/api_docs/python/io_ops.md397
-rw-r--r--tensorflow/g3doc/api_docs/python/math_ops.md244
-rw-r--r--tensorflow/g3doc/api_docs/python/nn.md120
-rw-r--r--tensorflow/g3doc/api_docs/python/python_io.md10
-rw-r--r--tensorflow/g3doc/api_docs/python/sparse_ops.md46
-rw-r--r--tensorflow/g3doc/api_docs/python/state_ops.md142
-rw-r--r--tensorflow/g3doc/api_docs/python/train.md136
-rw-r--r--tensorflow/g3doc/get_started/basic_usage.md2
-rw-r--r--tensorflow/g3doc/get_started/index.md8
-rw-r--r--tensorflow/g3doc/get_started/os_setup.md126
-rw-r--r--tensorflow/g3doc/how_tos/adding_an_op/index.md121
-rw-r--r--tensorflow/g3doc/how_tos/adding_an_op/zero_out_2_test.py28
-rw-r--r--tensorflow/g3doc/how_tos/adding_an_op/zero_out_3_test.py35
-rw-r--r--tensorflow/g3doc/how_tos/adding_an_op/zero_out_grad_2.py25
-rw-r--r--tensorflow/g3doc/how_tos/adding_an_op/zero_out_op_kernel_1.cc1
-rw-r--r--tensorflow/g3doc/how_tos/adding_an_op/zero_out_op_kernel_2.cc (renamed from tensorflow/g3doc/how_tos/adding_an_op/register_kernels.cc)37
-rw-r--r--tensorflow/g3doc/how_tos/adding_an_op/zero_out_op_kernel_3.cc52
-rw-r--r--tensorflow/g3doc/how_tos/graph_viz/index.md26
-rw-r--r--tensorflow/g3doc/how_tos/summaries_and_tensorboard/index.md35
-rw-r--r--tensorflow/g3doc/how_tos/variables/index.md2
-rw-r--r--tensorflow/g3doc/resources/faq.md19
-rw-r--r--tensorflow/g3doc/resources/glossary.md87
-rw-r--r--tensorflow/g3doc/resources/uses.md1
-rw-r--r--tensorflow/g3doc/tutorials/deep_cnn/index.md38
-rwxr-xr-xtensorflow/g3doc/tutorials/mandelbrot/index.md58
-rwxr-xr-xtensorflow/g3doc/tutorials/mandelbrot/output_8_0.jpebin20185 -> 0 bytes
-rw-r--r--tensorflow/g3doc/tutorials/mnist/beginners/index.md26
-rw-r--r--tensorflow/g3doc/tutorials/mnist/download/index.md2
-rw-r--r--tensorflow/g3doc/tutorials/mnist/pros/index.md7
-rwxr-xr-xtensorflow/g3doc/tutorials/pdes/index.md45
-rwxr-xr-xtensorflow/g3doc/tutorials/pdes/output_11_0.jpebin15819 -> 0 bytes
-rwxr-xr-xtensorflow/g3doc/tutorials/pdes/output_8_0.jpebin3952 -> 0 bytes
-rw-r--r--tensorflow/g3doc/tutorials/word2vec/index.md36
-rw-r--r--tensorflow/python/framework/docs.py4
-rw-r--r--tensorflow/python/framework/ops.py4
-rw-r--r--tensorflow/python/training/input.py201
-rw-r--r--tensorflow/tools/docker/Dockerfile100
-rw-r--r--tensorflow/tools/docker/README.md63
63 files changed, 2003 insertions, 1752 deletions
diff --git a/tensorflow/core/public/session.h b/tensorflow/core/public/session.h
index a33d5ee6ae..abce20e218 100644
--- a/tensorflow/core/public/session.h
+++ b/tensorflow/core/public/session.h
@@ -25,8 +25,10 @@ namespace tensorflow {
///
/// Example:
///
+/// ```c++
+///
/// tensorflow::GraphDef graph;
-/// // ... Create or load graph into 'graph'.
+/// // ... Create or load graph into "graph".
///
/// // This example uses the default options which connects
/// // to a local runtime.
@@ -54,6 +56,8 @@ namespace tensorflow {
/// // this session.
/// session->Close()
///
+/// ```
+///
/// A Session allows concurrent calls to Run(), though a Session must
/// be created / extended by a single thread.
///
diff --git a/tensorflow/core/public/tensor.h b/tensorflow/core/public/tensor.h
index 6c6ff0f58a..7ff97e8a73 100644
--- a/tensorflow/core/public/tensor.h
+++ b/tensorflow/core/public/tensor.h
@@ -132,6 +132,9 @@ class Tensor {
/// fails if either type or sizes mismatch.
///
/// Example:
+ ///
+ /// ```c++
+ ///
/// typedef float T;
/// Tensor my_mat(...built with Shape{rows: 3, cols: 5}...);
/// auto mat = my_mat.matrix<T>(); // 2D Eigen::Tensor, 3 x 5.
@@ -139,6 +142,8 @@ class Tensor {
/// auto vec = my_mat.vec<T>(); // CHECK fails as my_mat is 2D.
/// auto vec = my_mat.tensor<T, 3>(); // CHECK fails as my_mat is 2D.
/// auto mat = my_mat.matrix<int32>();// CHECK fails as type mismatch.
+ ///
+ /// ```
template <typename T>
typename TTypes<T>::Vec vec() {
return tensor<T, 1>();
@@ -162,6 +167,9 @@ class Tensor {
/// Eigen::Tensor with the same number of elements as the Tensor.
///
/// Example:
+ ///
+ /// ```c++
+ ///
/// typedef float T;
/// Tensor my_ten(...built with Shape{planes: 4, rows: 3, cols: 5}...);
/// // 1D Eigen::Tensor, size 60:
@@ -176,6 +184,8 @@ class Tensor {
/// auto weird = my_ten.shaped<T, 3>({6, 5, 2});
/// // CHECK fails, type mismatch:
/// auto bad = my_ten.flat<int32>();
+ ///
+ /// ```
template <typename T>
typename TTypes<T>::Flat flat() {
return shaped<T, 1>({NumElements()});
diff --git a/tensorflow/g3doc/api_docs/cc/ClassEnv.md b/tensorflow/g3doc/api_docs/cc/ClassEnv.md
index 039087e703..642b65392f 100644
--- a/tensorflow/g3doc/api_docs/cc/ClassEnv.md
+++ b/tensorflow/g3doc/api_docs/cc/ClassEnv.md
@@ -1,4 +1,4 @@
-#Class tensorflow::Env <a class="md-anchor" id="AUTOGENERATED-class-tensorflow--env"></a>
+# Class `tensorflow::Env` <a class="md-anchor" id="AUTOGENERATED-class--tensorflow--env-"></a>
An interface used by the tensorflow implementation to access operating system functionality like the filesystem etc.
@@ -8,52 +8,52 @@ All Env implementations are safe for concurrent access from multiple threads wit
##Member Summary <a class="md-anchor" id="AUTOGENERATED-member-summary"></a>
-* [tensorflow::Env::Env](#tensorflow_Env_Env)
-* [virtual tensorflow::Env::~Env](#virtual_tensorflow_Env_Env)
-* [virtual Status tensorflow::Env::NewRandomAccessFile](#virtual_Status_tensorflow_Env_NewRandomAccessFile)
+* [`tensorflow::Env::Env()`](#tensorflow_Env_Env)
+* [`virtual tensorflow::Env::~Env()`](#virtual_tensorflow_Env_Env)
+* [`virtual Status tensorflow::Env::NewRandomAccessFile(const string &fname, RandomAccessFile **result)=0`](#virtual_Status_tensorflow_Env_NewRandomAccessFile)
* Creates a brand new random access read-only file with the specified name.
-* [virtual Status tensorflow::Env::NewWritableFile](#virtual_Status_tensorflow_Env_NewWritableFile)
+* [`virtual Status tensorflow::Env::NewWritableFile(const string &fname, WritableFile **result)=0`](#virtual_Status_tensorflow_Env_NewWritableFile)
* Creates an object that writes to a new file with the specified name.
-* [virtual Status tensorflow::Env::NewAppendableFile](#virtual_Status_tensorflow_Env_NewAppendableFile)
+* [`virtual Status tensorflow::Env::NewAppendableFile(const string &fname, WritableFile **result)=0`](#virtual_Status_tensorflow_Env_NewAppendableFile)
* Creates an object that either appends to an existing file, or writes to a new file (if the file does not exist to begin with).
-* [virtual bool tensorflow::Env::FileExists](#virtual_bool_tensorflow_Env_FileExists)
+* [`virtual bool tensorflow::Env::FileExists(const string &fname)=0`](#virtual_bool_tensorflow_Env_FileExists)
* Returns true iff the named file exists.
-* [virtual Status tensorflow::Env::GetChildren](#virtual_Status_tensorflow_Env_GetChildren)
- * Stores in *result the names of the children of the specified directory. The names are relative to &quot;dir&quot;.
-* [virtual Status tensorflow::Env::DeleteFile](#virtual_Status_tensorflow_Env_DeleteFile)
+* [`virtual Status tensorflow::Env::GetChildren(const string &dir, std::vector< string > *result)=0`](#virtual_Status_tensorflow_Env_GetChildren)
+ * Stores in *result the names of the children of the specified directory. The names are relative to "dir".
+* [`virtual Status tensorflow::Env::DeleteFile(const string &fname)=0`](#virtual_Status_tensorflow_Env_DeleteFile)
* Deletes the named file.
-* [virtual Status tensorflow::Env::CreateDir](#virtual_Status_tensorflow_Env_CreateDir)
+* [`virtual Status tensorflow::Env::CreateDir(const string &dirname)=0`](#virtual_Status_tensorflow_Env_CreateDir)
* Creates the specified directory.
-* [virtual Status tensorflow::Env::DeleteDir](#virtual_Status_tensorflow_Env_DeleteDir)
+* [`virtual Status tensorflow::Env::DeleteDir(const string &dirname)=0`](#virtual_Status_tensorflow_Env_DeleteDir)
* Deletes the specified directory.
-* [virtual Status tensorflow::Env::GetFileSize](#virtual_Status_tensorflow_Env_GetFileSize)
+* [`virtual Status tensorflow::Env::GetFileSize(const string &fname, uint64 *file_size)=0`](#virtual_Status_tensorflow_Env_GetFileSize)
* Stores the size of fname in *file_size.
-* [virtual Status tensorflow::Env::RenameFile](#virtual_Status_tensorflow_Env_RenameFile)
+* [`virtual Status tensorflow::Env::RenameFile(const string &src, const string &target)=0`](#virtual_Status_tensorflow_Env_RenameFile)
* Renames file src to target. If target already exists, it will be replaced.
-* [virtual uint64 tensorflow::Env::NowMicros](#virtual_uint64_tensorflow_Env_NowMicros)
+* [`virtual uint64 tensorflow::Env::NowMicros()=0`](#virtual_uint64_tensorflow_Env_NowMicros)
* Returns the number of micro-seconds since some fixed point in time. Only useful for computing deltas of time.
-* [virtual void tensorflow::Env::SleepForMicroseconds](#virtual_void_tensorflow_Env_SleepForMicroseconds)
+* [`virtual void tensorflow::Env::SleepForMicroseconds(int micros)=0`](#virtual_void_tensorflow_Env_SleepForMicroseconds)
* Sleeps/delays the thread for the prescribed number of micro-seconds.
-* [virtual Thread* tensorflow::Env::StartThread](#virtual_Thread_tensorflow_Env_StartThread)
- * Returns a new thread that is running fn() and is identified (for debugging/performance-analysis) by &quot;name&quot;.
-* [static Env* tensorflow::Env::Default](#static_Env_tensorflow_Env_Default)
+* [`virtual Thread* tensorflow::Env::StartThread(const ThreadOptions &thread_options, const string &name, std::function< void()> fn) TF_MUST_USE_RESULT=0`](#virtual_Thread_tensorflow_Env_StartThread)
+ * Returns a new thread that is running fn() and is identified (for debugging/performance-analysis) by "name".
+* [`static Env* tensorflow::Env::Default()`](#static_Env_tensorflow_Env_Default)
* Returns a default environment suitable for the current operating system.
##Member Details <a class="md-anchor" id="AUTOGENERATED-member-details"></a>
-#### tensorflow::Env::Env() <a class="md-anchor" id="tensorflow_Env_Env"></a>
+#### `tensorflow::Env::Env()` <a class="md-anchor" id="tensorflow_Env_Env"></a>
-#### virtual tensorflow::Env::~Env() <a class="md-anchor" id="virtual_tensorflow_Env_Env"></a>
+#### `virtual tensorflow::Env::~Env()` <a class="md-anchor" id="virtual_tensorflow_Env_Env"></a>
-#### virtual Status tensorflow::Env::NewRandomAccessFile(const string &amp;fname, RandomAccessFile **result)=0 <a class="md-anchor" id="virtual_Status_tensorflow_Env_NewRandomAccessFile"></a>
+#### `virtual Status tensorflow::Env::NewRandomAccessFile(const string &fname, RandomAccessFile **result)=0` <a class="md-anchor" id="virtual_Status_tensorflow_Env_NewRandomAccessFile"></a>
Creates a brand new random access read-only file with the specified name.
@@ -61,7 +61,7 @@ On success, stores a pointer to the new file in *result and returns OK. On failu
The returned file may be concurrently accessed by multiple threads.
-#### virtual Status tensorflow::Env::NewWritableFile(const string &amp;fname, WritableFile **result)=0 <a class="md-anchor" id="virtual_Status_tensorflow_Env_NewWritableFile"></a>
+#### `virtual Status tensorflow::Env::NewWritableFile(const string &fname, WritableFile **result)=0` <a class="md-anchor" id="virtual_Status_tensorflow_Env_NewWritableFile"></a>
Creates an object that writes to a new file with the specified name.
@@ -69,7 +69,7 @@ Deletes any existing file with the same name and creates a new file. On success,
The returned file will only be accessed by one thread at a time.
-#### virtual Status tensorflow::Env::NewAppendableFile(const string &amp;fname, WritableFile **result)=0 <a class="md-anchor" id="virtual_Status_tensorflow_Env_NewAppendableFile"></a>
+#### `virtual Status tensorflow::Env::NewAppendableFile(const string &fname, WritableFile **result)=0` <a class="md-anchor" id="virtual_Status_tensorflow_Env_NewAppendableFile"></a>
Creates an object that either appends to an existing file, or writes to a new file (if the file does not exist to begin with).
@@ -77,67 +77,67 @@ On success, stores a pointer to the new file in *result and returns OK. On failu
The returned file will only be accessed by one thread at a time.
-#### virtual bool tensorflow::Env::FileExists(const string &amp;fname)=0 <a class="md-anchor" id="virtual_bool_tensorflow_Env_FileExists"></a>
+#### `virtual bool tensorflow::Env::FileExists(const string &fname)=0` <a class="md-anchor" id="virtual_bool_tensorflow_Env_FileExists"></a>
Returns true iff the named file exists.
-#### virtual Status tensorflow::Env::GetChildren(const string &amp;dir, std::vector&lt; string &gt; *result)=0 <a class="md-anchor" id="virtual_Status_tensorflow_Env_GetChildren"></a>
+#### `virtual Status tensorflow::Env::GetChildren(const string &dir, std::vector< string > *result)=0` <a class="md-anchor" id="virtual_Status_tensorflow_Env_GetChildren"></a>
-Stores in *result the names of the children of the specified directory. The names are relative to &quot;dir&quot;.
+Stores in *result the names of the children of the specified directory. The names are relative to "dir".
Original contents of *results are dropped.
-#### virtual Status tensorflow::Env::DeleteFile(const string &amp;fname)=0 <a class="md-anchor" id="virtual_Status_tensorflow_Env_DeleteFile"></a>
+#### `virtual Status tensorflow::Env::DeleteFile(const string &fname)=0` <a class="md-anchor" id="virtual_Status_tensorflow_Env_DeleteFile"></a>
Deletes the named file.
-#### virtual Status tensorflow::Env::CreateDir(const string &amp;dirname)=0 <a class="md-anchor" id="virtual_Status_tensorflow_Env_CreateDir"></a>
+#### `virtual Status tensorflow::Env::CreateDir(const string &dirname)=0` <a class="md-anchor" id="virtual_Status_tensorflow_Env_CreateDir"></a>
Creates the specified directory.
-#### virtual Status tensorflow::Env::DeleteDir(const string &amp;dirname)=0 <a class="md-anchor" id="virtual_Status_tensorflow_Env_DeleteDir"></a>
+#### `virtual Status tensorflow::Env::DeleteDir(const string &dirname)=0` <a class="md-anchor" id="virtual_Status_tensorflow_Env_DeleteDir"></a>
Deletes the specified directory.
-#### virtual Status tensorflow::Env::GetFileSize(const string &amp;fname, uint64 *file_size)=0 <a class="md-anchor" id="virtual_Status_tensorflow_Env_GetFileSize"></a>
+#### `virtual Status tensorflow::Env::GetFileSize(const string &fname, uint64 *file_size)=0` <a class="md-anchor" id="virtual_Status_tensorflow_Env_GetFileSize"></a>
Stores the size of fname in *file_size.
-#### virtual Status tensorflow::Env::RenameFile(const string &amp;src, const string &amp;target)=0 <a class="md-anchor" id="virtual_Status_tensorflow_Env_RenameFile"></a>
+#### `virtual Status tensorflow::Env::RenameFile(const string &src, const string &target)=0` <a class="md-anchor" id="virtual_Status_tensorflow_Env_RenameFile"></a>
Renames file src to target. If target already exists, it will be replaced.
-#### virtual uint64 tensorflow::Env::NowMicros()=0 <a class="md-anchor" id="virtual_uint64_tensorflow_Env_NowMicros"></a>
+#### `virtual uint64 tensorflow::Env::NowMicros()=0` <a class="md-anchor" id="virtual_uint64_tensorflow_Env_NowMicros"></a>
Returns the number of micro-seconds since some fixed point in time. Only useful for computing deltas of time.
-#### virtual void tensorflow::Env::SleepForMicroseconds(int micros)=0 <a class="md-anchor" id="virtual_void_tensorflow_Env_SleepForMicroseconds"></a>
+#### `virtual void tensorflow::Env::SleepForMicroseconds(int micros)=0` <a class="md-anchor" id="virtual_void_tensorflow_Env_SleepForMicroseconds"></a>
Sleeps/delays the thread for the prescribed number of micro-seconds.
-#### virtual Thread* tensorflow::Env::StartThread(const ThreadOptions &amp;thread_options, const string &amp;name, std::function&lt; void()&gt; fn) TF_MUST_USE_RESULT=0 <a class="md-anchor" id="virtual_Thread_tensorflow_Env_StartThread"></a>
+#### `virtual Thread* tensorflow::Env::StartThread(const ThreadOptions &thread_options, const string &name, std::function< void()> fn) TF_MUST_USE_RESULT=0` <a class="md-anchor" id="virtual_Thread_tensorflow_Env_StartThread"></a>
-Returns a new thread that is running fn() and is identified (for debugging/performance-analysis) by &quot;name&quot;.
+Returns a new thread that is running fn() and is identified (for debugging/performance-analysis) by "name".
Caller takes ownership of the result and must delete it eventually (the deletion will block until fn() stops running).
-#### static Env* tensorflow::Env::Default() <a class="md-anchor" id="static_Env_tensorflow_Env_Default"></a>
+#### `static Env* tensorflow::Env::Default()` <a class="md-anchor" id="static_Env_tensorflow_Env_Default"></a>
Returns a default environment suitable for the current operating system.
diff --git a/tensorflow/g3doc/api_docs/cc/ClassEnvWrapper.md b/tensorflow/g3doc/api_docs/cc/ClassEnvWrapper.md
index 58e1059886..cd18bb24bf 100644
--- a/tensorflow/g3doc/api_docs/cc/ClassEnvWrapper.md
+++ b/tensorflow/g3doc/api_docs/cc/ClassEnvWrapper.md
@@ -1,4 +1,4 @@
-#Class tensorflow::EnvWrapper <a class="md-anchor" id="AUTOGENERATED-class-tensorflow--envwrapper"></a>
+# Class `tensorflow::EnvWrapper` <a class="md-anchor" id="AUTOGENERATED-class--tensorflow--envwrapper-"></a>
An implementation of Env that forwards all calls to another Env .
@@ -6,59 +6,59 @@ May be useful to clients who wish to override just part of the functionality of
##Member Summary <a class="md-anchor" id="AUTOGENERATED-member-summary"></a>
-* [tensorflow::EnvWrapper::EnvWrapper](#tensorflow_EnvWrapper_EnvWrapper)
+* [`tensorflow::EnvWrapper::EnvWrapper(Env *t)`](#tensorflow_EnvWrapper_EnvWrapper)
* Initializes an EnvWrapper that delegates all calls to *t.
-* [virtual tensorflow::EnvWrapper::~EnvWrapper](#virtual_tensorflow_EnvWrapper_EnvWrapper)
-* [Env* tensorflow::EnvWrapper::target](#Env_tensorflow_EnvWrapper_target)
+* [`virtual tensorflow::EnvWrapper::~EnvWrapper()`](#virtual_tensorflow_EnvWrapper_EnvWrapper)
+* [`Env* tensorflow::EnvWrapper::target() const`](#Env_tensorflow_EnvWrapper_target)
* Returns the target to which this Env forwards all calls.
-* [Status tensorflow::EnvWrapper::NewRandomAccessFile](#Status_tensorflow_EnvWrapper_NewRandomAccessFile)
+* [`Status tensorflow::EnvWrapper::NewRandomAccessFile(const string &f, RandomAccessFile **r) override`](#Status_tensorflow_EnvWrapper_NewRandomAccessFile)
* Creates a brand new random access read-only file with the specified name.
-* [Status tensorflow::EnvWrapper::NewWritableFile](#Status_tensorflow_EnvWrapper_NewWritableFile)
+* [`Status tensorflow::EnvWrapper::NewWritableFile(const string &f, WritableFile **r) override`](#Status_tensorflow_EnvWrapper_NewWritableFile)
* Creates an object that writes to a new file with the specified name.
-* [Status tensorflow::EnvWrapper::NewAppendableFile](#Status_tensorflow_EnvWrapper_NewAppendableFile)
+* [`Status tensorflow::EnvWrapper::NewAppendableFile(const string &f, WritableFile **r) override`](#Status_tensorflow_EnvWrapper_NewAppendableFile)
* Creates an object that either appends to an existing file, or writes to a new file (if the file does not exist to begin with).
-* [bool tensorflow::EnvWrapper::FileExists](#bool_tensorflow_EnvWrapper_FileExists)
+* [`bool tensorflow::EnvWrapper::FileExists(const string &f) override`](#bool_tensorflow_EnvWrapper_FileExists)
* Returns true iff the named file exists.
-* [Status tensorflow::EnvWrapper::GetChildren](#Status_tensorflow_EnvWrapper_GetChildren)
- * Stores in *result the names of the children of the specified directory. The names are relative to &quot;dir&quot;.
-* [Status tensorflow::EnvWrapper::DeleteFile](#Status_tensorflow_EnvWrapper_DeleteFile)
+* [`Status tensorflow::EnvWrapper::GetChildren(const string &dir, std::vector< string > *r) override`](#Status_tensorflow_EnvWrapper_GetChildren)
+ * Stores in *result the names of the children of the specified directory. The names are relative to "dir".
+* [`Status tensorflow::EnvWrapper::DeleteFile(const string &f) override`](#Status_tensorflow_EnvWrapper_DeleteFile)
* Deletes the named file.
-* [Status tensorflow::EnvWrapper::CreateDir](#Status_tensorflow_EnvWrapper_CreateDir)
+* [`Status tensorflow::EnvWrapper::CreateDir(const string &d) override`](#Status_tensorflow_EnvWrapper_CreateDir)
* Creates the specified directory.
-* [Status tensorflow::EnvWrapper::DeleteDir](#Status_tensorflow_EnvWrapper_DeleteDir)
+* [`Status tensorflow::EnvWrapper::DeleteDir(const string &d) override`](#Status_tensorflow_EnvWrapper_DeleteDir)
* Deletes the specified directory.
-* [Status tensorflow::EnvWrapper::GetFileSize](#Status_tensorflow_EnvWrapper_GetFileSize)
+* [`Status tensorflow::EnvWrapper::GetFileSize(const string &f, uint64 *s) override`](#Status_tensorflow_EnvWrapper_GetFileSize)
* Stores the size of fname in *file_size.
-* [Status tensorflow::EnvWrapper::RenameFile](#Status_tensorflow_EnvWrapper_RenameFile)
+* [`Status tensorflow::EnvWrapper::RenameFile(const string &s, const string &t) override`](#Status_tensorflow_EnvWrapper_RenameFile)
* Renames file src to target. If target already exists, it will be replaced.
-* [uint64 tensorflow::EnvWrapper::NowMicros](#uint64_tensorflow_EnvWrapper_NowMicros)
+* [`uint64 tensorflow::EnvWrapper::NowMicros() override`](#uint64_tensorflow_EnvWrapper_NowMicros)
* Returns the number of micro-seconds since some fixed point in time. Only useful for computing deltas of time.
-* [void tensorflow::EnvWrapper::SleepForMicroseconds](#void_tensorflow_EnvWrapper_SleepForMicroseconds)
+* [`void tensorflow::EnvWrapper::SleepForMicroseconds(int micros) override`](#void_tensorflow_EnvWrapper_SleepForMicroseconds)
* Sleeps/delays the thread for the prescribed number of micro-seconds.
-* [Thread* tensorflow::EnvWrapper::StartThread](#Thread_tensorflow_EnvWrapper_StartThread)
- * Returns a new thread that is running fn() and is identified (for debugging/performance-analysis) by &quot;name&quot;.
+* [`Thread* tensorflow::EnvWrapper::StartThread(const ThreadOptions &thread_options, const string &name, std::function< void()> fn) override`](#Thread_tensorflow_EnvWrapper_StartThread)
+ * Returns a new thread that is running fn() and is identified (for debugging/performance-analysis) by "name".
##Member Details <a class="md-anchor" id="AUTOGENERATED-member-details"></a>
-#### tensorflow::EnvWrapper::EnvWrapper(Env *t) <a class="md-anchor" id="tensorflow_EnvWrapper_EnvWrapper"></a>
+#### `tensorflow::EnvWrapper::EnvWrapper(Env *t)` <a class="md-anchor" id="tensorflow_EnvWrapper_EnvWrapper"></a>
Initializes an EnvWrapper that delegates all calls to *t.
-#### virtual tensorflow::EnvWrapper::~EnvWrapper() <a class="md-anchor" id="virtual_tensorflow_EnvWrapper_EnvWrapper"></a>
+#### `virtual tensorflow::EnvWrapper::~EnvWrapper()` <a class="md-anchor" id="virtual_tensorflow_EnvWrapper_EnvWrapper"></a>
-#### Env* tensorflow::EnvWrapper::target() const <a class="md-anchor" id="Env_tensorflow_EnvWrapper_target"></a>
+#### `Env* tensorflow::EnvWrapper::target() const` <a class="md-anchor" id="Env_tensorflow_EnvWrapper_target"></a>
Returns the target to which this Env forwards all calls.
-#### Status tensorflow::EnvWrapper::NewRandomAccessFile(const string &amp;f, RandomAccessFile **r) override <a class="md-anchor" id="Status_tensorflow_EnvWrapper_NewRandomAccessFile"></a>
+#### `Status tensorflow::EnvWrapper::NewRandomAccessFile(const string &f, RandomAccessFile **r) override` <a class="md-anchor" id="Status_tensorflow_EnvWrapper_NewRandomAccessFile"></a>
Creates a brand new random access read-only file with the specified name.
@@ -66,7 +66,7 @@ On success, stores a pointer to the new file in *result and returns OK. On failu
The returned file may be concurrently accessed by multiple threads.
-#### Status tensorflow::EnvWrapper::NewWritableFile(const string &amp;f, WritableFile **r) override <a class="md-anchor" id="Status_tensorflow_EnvWrapper_NewWritableFile"></a>
+#### `Status tensorflow::EnvWrapper::NewWritableFile(const string &f, WritableFile **r) override` <a class="md-anchor" id="Status_tensorflow_EnvWrapper_NewWritableFile"></a>
Creates an object that writes to a new file with the specified name.
@@ -74,7 +74,7 @@ Deletes any existing file with the same name and creates a new file. On success,
The returned file will only be accessed by one thread at a time.
-#### Status tensorflow::EnvWrapper::NewAppendableFile(const string &amp;f, WritableFile **r) override <a class="md-anchor" id="Status_tensorflow_EnvWrapper_NewAppendableFile"></a>
+#### `Status tensorflow::EnvWrapper::NewAppendableFile(const string &f, WritableFile **r) override` <a class="md-anchor" id="Status_tensorflow_EnvWrapper_NewAppendableFile"></a>
Creates an object that either appends to an existing file, or writes to a new file (if the file does not exist to begin with).
@@ -82,62 +82,62 @@ On success, stores a pointer to the new file in *result and returns OK. On failu
The returned file will only be accessed by one thread at a time.
-#### bool tensorflow::EnvWrapper::FileExists(const string &amp;f) override <a class="md-anchor" id="bool_tensorflow_EnvWrapper_FileExists"></a>
+#### `bool tensorflow::EnvWrapper::FileExists(const string &f) override` <a class="md-anchor" id="bool_tensorflow_EnvWrapper_FileExists"></a>
Returns true iff the named file exists.
-#### Status tensorflow::EnvWrapper::GetChildren(const string &amp;dir, std::vector&lt; string &gt; *r) override <a class="md-anchor" id="Status_tensorflow_EnvWrapper_GetChildren"></a>
+#### `Status tensorflow::EnvWrapper::GetChildren(const string &dir, std::vector< string > *r) override` <a class="md-anchor" id="Status_tensorflow_EnvWrapper_GetChildren"></a>
-Stores in *result the names of the children of the specified directory. The names are relative to &quot;dir&quot;.
+Stores in *result the names of the children of the specified directory. The names are relative to "dir".
Original contents of *results are dropped.
-#### Status tensorflow::EnvWrapper::DeleteFile(const string &amp;f) override <a class="md-anchor" id="Status_tensorflow_EnvWrapper_DeleteFile"></a>
+#### `Status tensorflow::EnvWrapper::DeleteFile(const string &f) override` <a class="md-anchor" id="Status_tensorflow_EnvWrapper_DeleteFile"></a>
Deletes the named file.
-#### Status tensorflow::EnvWrapper::CreateDir(const string &amp;d) override <a class="md-anchor" id="Status_tensorflow_EnvWrapper_CreateDir"></a>
+#### `Status tensorflow::EnvWrapper::CreateDir(const string &d) override` <a class="md-anchor" id="Status_tensorflow_EnvWrapper_CreateDir"></a>
Creates the specified directory.
-#### Status tensorflow::EnvWrapper::DeleteDir(const string &amp;d) override <a class="md-anchor" id="Status_tensorflow_EnvWrapper_DeleteDir"></a>
+#### `Status tensorflow::EnvWrapper::DeleteDir(const string &d) override` <a class="md-anchor" id="Status_tensorflow_EnvWrapper_DeleteDir"></a>
Deletes the specified directory.
-#### Status tensorflow::EnvWrapper::GetFileSize(const string &amp;f, uint64 *s) override <a class="md-anchor" id="Status_tensorflow_EnvWrapper_GetFileSize"></a>
+#### `Status tensorflow::EnvWrapper::GetFileSize(const string &f, uint64 *s) override` <a class="md-anchor" id="Status_tensorflow_EnvWrapper_GetFileSize"></a>
Stores the size of fname in *file_size.
-#### Status tensorflow::EnvWrapper::RenameFile(const string &amp;s, const string &amp;t) override <a class="md-anchor" id="Status_tensorflow_EnvWrapper_RenameFile"></a>
+#### `Status tensorflow::EnvWrapper::RenameFile(const string &s, const string &t) override` <a class="md-anchor" id="Status_tensorflow_EnvWrapper_RenameFile"></a>
Renames file src to target. If target already exists, it will be replaced.
-#### uint64 tensorflow::EnvWrapper::NowMicros() override <a class="md-anchor" id="uint64_tensorflow_EnvWrapper_NowMicros"></a>
+#### `uint64 tensorflow::EnvWrapper::NowMicros() override` <a class="md-anchor" id="uint64_tensorflow_EnvWrapper_NowMicros"></a>
Returns the number of micro-seconds since some fixed point in time. Only useful for computing deltas of time.
-#### void tensorflow::EnvWrapper::SleepForMicroseconds(int micros) override <a class="md-anchor" id="void_tensorflow_EnvWrapper_SleepForMicroseconds"></a>
+#### `void tensorflow::EnvWrapper::SleepForMicroseconds(int micros) override` <a class="md-anchor" id="void_tensorflow_EnvWrapper_SleepForMicroseconds"></a>
Sleeps/delays the thread for the prescribed number of micro-seconds.
-#### Thread* tensorflow::EnvWrapper::StartThread(const ThreadOptions &amp;thread_options, const string &amp;name, std::function&lt; void()&gt; fn) override <a class="md-anchor" id="Thread_tensorflow_EnvWrapper_StartThread"></a>
+#### `Thread* tensorflow::EnvWrapper::StartThread(const ThreadOptions &thread_options, const string &name, std::function< void()> fn) override` <a class="md-anchor" id="Thread_tensorflow_EnvWrapper_StartThread"></a>
-Returns a new thread that is running fn() and is identified (for debugging/performance-analysis) by &quot;name&quot;.
+Returns a new thread that is running fn() and is identified (for debugging/performance-analysis) by "name".
Caller takes ownership of the result and must delete it eventually (the deletion will block until fn() stops running).
diff --git a/tensorflow/g3doc/api_docs/cc/ClassRandomAccessFile.md b/tensorflow/g3doc/api_docs/cc/ClassRandomAccessFile.md
index b3647db7c7..d9ffdc05f4 100644
--- a/tensorflow/g3doc/api_docs/cc/ClassRandomAccessFile.md
+++ b/tensorflow/g3doc/api_docs/cc/ClassRandomAccessFile.md
@@ -1,4 +1,4 @@
-#Class tensorflow::RandomAccessFile <a class="md-anchor" id="AUTOGENERATED-class-tensorflow--randomaccessfile"></a>
+# Class `tensorflow::RandomAccessFile` <a class="md-anchor" id="AUTOGENERATED-class--tensorflow--randomaccessfile-"></a>
A file abstraction for randomly reading the contents of a file.
@@ -6,33 +6,33 @@ A file abstraction for randomly reading the contents of a file.
##Member Summary <a class="md-anchor" id="AUTOGENERATED-member-summary"></a>
-* [tensorflow::RandomAccessFile::RandomAccessFile](#tensorflow_RandomAccessFile_RandomAccessFile)
-* [virtual tensorflow::RandomAccessFile::~RandomAccessFile](#virtual_tensorflow_RandomAccessFile_RandomAccessFile)
-* [virtual Status tensorflow::RandomAccessFile::Read](#virtual_Status_tensorflow_RandomAccessFile_Read)
- * Reads up to &quot;n&quot; bytes from the file starting at &quot;offset&quot;.
+* [`tensorflow::RandomAccessFile::RandomAccessFile()`](#tensorflow_RandomAccessFile_RandomAccessFile)
+* [`virtual tensorflow::RandomAccessFile::~RandomAccessFile()`](#virtual_tensorflow_RandomAccessFile_RandomAccessFile)
+* [`virtual Status tensorflow::RandomAccessFile::Read(uint64 offset, size_t n, StringPiece *result, char *scratch) const =0`](#virtual_Status_tensorflow_RandomAccessFile_Read)
+ * Reads up to "n" bytes from the file starting at "offset".
##Member Details <a class="md-anchor" id="AUTOGENERATED-member-details"></a>
-#### tensorflow::RandomAccessFile::RandomAccessFile() <a class="md-anchor" id="tensorflow_RandomAccessFile_RandomAccessFile"></a>
+#### `tensorflow::RandomAccessFile::RandomAccessFile()` <a class="md-anchor" id="tensorflow_RandomAccessFile_RandomAccessFile"></a>
-#### virtual tensorflow::RandomAccessFile::~RandomAccessFile() <a class="md-anchor" id="virtual_tensorflow_RandomAccessFile_RandomAccessFile"></a>
+#### `virtual tensorflow::RandomAccessFile::~RandomAccessFile()` <a class="md-anchor" id="virtual_tensorflow_RandomAccessFile_RandomAccessFile"></a>
-#### virtual Status tensorflow::RandomAccessFile::Read(uint64 offset, size_t n, StringPiece *result, char *scratch) const =0 <a class="md-anchor" id="virtual_Status_tensorflow_RandomAccessFile_Read"></a>
+#### `virtual Status tensorflow::RandomAccessFile::Read(uint64 offset, size_t n, StringPiece *result, char *scratch) const =0` <a class="md-anchor" id="virtual_Status_tensorflow_RandomAccessFile_Read"></a>
-Reads up to &quot;n&quot; bytes from the file starting at &quot;offset&quot;.
+Reads up to "n" bytes from the file starting at "offset".
-&quot;scratch[0..n-1]&quot; may be written by this routine. Sets &quot;*result&quot; to the data that was read (including if fewer than &quot;n&quot; bytes were successfully read). May set &quot;*result&quot; to point at data in &quot;scratch[0..n-1]&quot;, so &quot;scratch[0..n-1]&quot; must be live when &quot;*result&quot; is used.
+"scratch[0..n-1]" may be written by this routine. Sets "*result" to the data that was read (including if fewer than "n" bytes were successfully read). May set "*result" to point at data in "scratch[0..n-1]", so "scratch[0..n-1]" must be live when "*result" is used.
-On OK returned status: &quot;n&quot; bytes have been stored in &quot;*result&quot;. On non-OK returned status: [0..n] bytes have been stored in &quot;*result&quot;.
+On OK returned status: "n" bytes have been stored in "*result". On non-OK returned status: [0..n] bytes have been stored in "*result".
-Returns OUT_OF_RANGE if fewer than n bytes were stored in &quot;*result&quot; because of EOF.
+Returns OUT_OF_RANGE if fewer than n bytes were stored in "*result" because of EOF.
Safe for concurrent use by multiple threads.
diff --git a/tensorflow/g3doc/api_docs/cc/ClassSession.md b/tensorflow/g3doc/api_docs/cc/ClassSession.md
index 21e99a8332..19c1a489fc 100644
--- a/tensorflow/g3doc/api_docs/cc/ClassSession.md
+++ b/tensorflow/g3doc/api_docs/cc/ClassSession.md
@@ -1,37 +1,41 @@
-#Class tensorflow::Session <a class="md-anchor" id="AUTOGENERATED-class-tensorflow--session"></a>
+# Class `tensorflow::Session` <a class="md-anchor" id="AUTOGENERATED-class--tensorflow--session-"></a>
A Session instance lets a caller drive a TensorFlow graph computation.
When a Session is created with a given target, a new Session object is bound to the universe of resources specified by that target. Those resources are available to this session to perform computation described in the GraphDef. After extending the session with a graph, the caller uses the Run() API to perform the computation and potentially fetch outputs as Tensors.
-Example: tensorflow::GraphDef graph;
-// ... Create or load graph into &apos;graph&apos;.
+Example:
+
+```c++ tensorflow::GraphDef graph;
+// ... Create or load graph into "graph".
// This example uses the default options which connects
// to a local runtime.
tensorflow::SessionOptions options;
-std::unique_ptr&lt;tensorflow::Session&gt;
+std::unique_ptr<tensorflow::Session>
session(tensorflow::NewSession(options));
// Create the session with this graph.
-tensorflow::Status s = session-&gt;Create(graph);
+tensorflow::Status s = session->Create(graph);
if (!s.ok()) { ... }
-// Run the graph and fetch the first output of the &quot;output&quot;
+// Run the graph and fetch the first output of the "output"
// operation, and also run to but do not return anything
-// for the &quot;update_state&quot; operation.
-std::vector&lt;tensorflow::Tensor&gt; outputs;
-s = session-&gt;Run({}, {&quot;output:0&quot;}, {&quot;update_state&quot;}, &amp;outputs);
+// for the "update_state" operation.
+std::vector<tensorflow::Tensor> outputs;
+s = session->Run({}, {"output:0"}, {"update_state"}, &outputs);
if (!s.ok()) { ... }
// Map the output as a flattened float tensor, and do something
// with it.
-auto output_tensor = outputs[0].flat&lt;float&gt;();
-if (output_tensor(0) &gt; 0.5) { ... }
+auto output_tensor = outputs[0].flat<float>();
+if (output_tensor(0) > 0.5) { ... }
// Close the session to release the resources associated with
// this session.
-session-&gt;Close()
+session->Close()
+
+```
A Session allows concurrent calls to Run() , though a Session must be created / extended by a single thread.
@@ -39,49 +43,49 @@ Only one thread must call Close() , and Close() must only be called after all ot
##Member Summary <a class="md-anchor" id="AUTOGENERATED-member-summary"></a>
-* [virtual Status tensorflow::Session::Create](#virtual_Status_tensorflow_Session_Create)
+* [`virtual Status tensorflow::Session::Create(const GraphDef &graph)=0`](#virtual_Status_tensorflow_Session_Create)
* Create the graph to be used for the session.
-* [virtual Status tensorflow::Session::Extend](#virtual_Status_tensorflow_Session_Extend)
+* [`virtual Status tensorflow::Session::Extend(const GraphDef &graph)=0`](#virtual_Status_tensorflow_Session_Extend)
* Adds operations to the graph that is already registered with the Session .
-* [virtual Status tensorflow::Session::Run](#virtual_Status_tensorflow_Session_Run)
+* [`virtual Status tensorflow::Session::Run(const std::vector< std::pair< string, Tensor > > &inputs, const std::vector< string > &output_tensor_names, const std::vector< string > &target_node_names, std::vector< Tensor > *outputs)=0`](#virtual_Status_tensorflow_Session_Run)
* Runs the graph with the provided input tensors and fills &apos;outputs&apos; for the endpoints specified in &apos;output_tensor_names&apos;. Runs to but does not return Tensors for the nodes in &apos;target_node_names&apos;.
-* [virtual Status tensorflow::Session::Close](#virtual_Status_tensorflow_Session_Close)
+* [`virtual Status tensorflow::Session::Close()=0`](#virtual_Status_tensorflow_Session_Close)
* Closes this session.
-* [virtual tensorflow::Session::~Session](#virtual_tensorflow_Session_Session)
+* [`virtual tensorflow::Session::~Session()`](#virtual_tensorflow_Session_Session)
##Member Details <a class="md-anchor" id="AUTOGENERATED-member-details"></a>
-#### virtual Status tensorflow::Session::Create(const GraphDef &amp;graph)=0 <a class="md-anchor" id="virtual_Status_tensorflow_Session_Create"></a>
+#### `virtual Status tensorflow::Session::Create(const GraphDef &graph)=0` <a class="md-anchor" id="virtual_Status_tensorflow_Session_Create"></a>
Create the graph to be used for the session.
Returns an error if this session has already been created with a graph. To re-use the session with a different graph, the caller must Close() the session first.
-#### virtual Status tensorflow::Session::Extend(const GraphDef &amp;graph)=0 <a class="md-anchor" id="virtual_Status_tensorflow_Session_Extend"></a>
+#### `virtual Status tensorflow::Session::Extend(const GraphDef &graph)=0` <a class="md-anchor" id="virtual_Status_tensorflow_Session_Extend"></a>
Adds operations to the graph that is already registered with the Session .
-The names of new operations in &quot;graph&quot; must not exist in the graph that is already registered.
+The names of new operations in "graph" must not exist in the graph that is already registered.
-#### virtual Status tensorflow::Session::Run(const std::vector&lt; std::pair&lt; string, Tensor &gt; &gt; &amp;inputs, const std::vector&lt; string &gt; &amp;output_tensor_names, const std::vector&lt; string &gt; &amp;target_node_names, std::vector&lt; Tensor &gt; *outputs)=0 <a class="md-anchor" id="virtual_Status_tensorflow_Session_Run"></a>
+#### `virtual Status tensorflow::Session::Run(const std::vector< std::pair< string, Tensor > > &inputs, const std::vector< string > &output_tensor_names, const std::vector< string > &target_node_names, std::vector< Tensor > *outputs)=0` <a class="md-anchor" id="virtual_Status_tensorflow_Session_Run"></a>
Runs the graph with the provided input tensors and fills &apos;outputs&apos; for the endpoints specified in &apos;output_tensor_names&apos;. Runs to but does not return Tensors for the nodes in &apos;target_node_names&apos;.
The order of tensors in &apos;outputs&apos; will match the order provided by &apos;output_tensor_names&apos;.
-If Run returns OK(), then outputs-&gt;size() will be equal to output_tensor_names.size(). If Run does not return OK(), the state of outputs is undefined.
+If Run returns OK(), then outputs->size() will be equal to output_tensor_names.size(). If Run does not return OK(), the state of outputs is undefined.
-REQUIRES: The name of each Tensor of the input or output must match a &quot;Tensor endpoint&quot; in the GraphDef passed to Create() .
+REQUIRES: The name of each Tensor of the input or output must match a "Tensor endpoint" in the GraphDef passed to Create() .
REQUIRES: outputs is not nullptr if output_tensor_names is non-empty.
-#### virtual Status tensorflow::Session::Close()=0 <a class="md-anchor" id="virtual_Status_tensorflow_Session_Close"></a>
+#### `virtual Status tensorflow::Session::Close()=0` <a class="md-anchor" id="virtual_Status_tensorflow_Session_Close"></a>
Closes this session.
Closing a session releases the resources used by this session on the TensorFlow runtime (specified during session creation by the &apos; SessionOptions::target &apos; field).
-#### virtual tensorflow::Session::~Session() <a class="md-anchor" id="virtual_tensorflow_Session_Session"></a>
+#### `virtual tensorflow::Session::~Session()` <a class="md-anchor" id="virtual_tensorflow_Session_Session"></a>
diff --git a/tensorflow/g3doc/api_docs/cc/ClassStatus.md b/tensorflow/g3doc/api_docs/cc/ClassStatus.md
index 2082930df0..ac7490f8fe 100644
--- a/tensorflow/g3doc/api_docs/cc/ClassStatus.md
+++ b/tensorflow/g3doc/api_docs/cc/ClassStatus.md
@@ -1,4 +1,4 @@
-#Class tensorflow::Status <a class="md-anchor" id="AUTOGENERATED-class-tensorflow--status"></a>
+# Class `tensorflow::Status` <a class="md-anchor" id="AUTOGENERATED-class--tensorflow--status-"></a>
@@ -6,101 +6,101 @@
##Member Summary <a class="md-anchor" id="AUTOGENERATED-member-summary"></a>
-* [tensorflow::Status::Status](#tensorflow_Status_Status)
+* [`tensorflow::Status::Status()`](#tensorflow_Status_Status)
* Create a success status.
-* [tensorflow::Status::~Status](#tensorflow_Status_Status)
-* [tensorflow::Status::Status](#tensorflow_Status_Status)
+* [`tensorflow::Status::~Status()`](#tensorflow_Status_Status)
+* [`tensorflow::Status::Status(tensorflow::error::Code code, tensorflow::StringPiece msg)`](#tensorflow_Status_Status)
* Create a status with the specified error code and msg as a human-readable string containing more detailed information.
-* [tensorflow::Status::Status](#tensorflow_Status_Status)
+* [`tensorflow::Status::Status(const Status &s)`](#tensorflow_Status_Status)
* Copy the specified status.
-* [void tensorflow::Status::operator=](#void_tensorflow_Status_operator_)
-* [bool tensorflow::Status::ok](#bool_tensorflow_Status_ok)
+* [`void tensorflow::Status::operator=(const Status &s)`](#void_tensorflow_Status_operator_)
+* [`bool tensorflow::Status::ok() const`](#bool_tensorflow_Status_ok)
* Returns true iff the status indicates success.
-* [tensorflow::error::Code tensorflow::Status::code](#tensorflow_error_Code_tensorflow_Status_code)
-* [const string&amp; tensorflow::Status::error_message](#const_string_amp_tensorflow_Status_error_message)
-* [bool tensorflow::Status::operator==](#bool_tensorflow_Status_operator_)
-* [bool tensorflow::Status::operator!=](#bool_tensorflow_Status_operator_)
-* [void tensorflow::Status::Update](#void_tensorflow_Status_Update)
- * If &quot;ok()&quot;, stores &quot;new_status&quot; into *this. If &quot;!ok()&quot;, preserves the current status, but may augment with additional information about &quot;new_status&quot;.
-* [string tensorflow::Status::ToString](#string_tensorflow_Status_ToString)
- * Return a string representation of this status suitable for printing. Returns the string &quot;OK&quot; for success.
-* [static Status tensorflow::Status::OK](#static_Status_tensorflow_Status_OK)
+* [`tensorflow::error::Code tensorflow::Status::code() const`](#tensorflow_error_Code_tensorflow_Status_code)
+* [`const string& tensorflow::Status::error_message() const`](#const_string_tensorflow_Status_error_message)
+* [`bool tensorflow::Status::operator==(const Status &x) const`](#bool_tensorflow_Status_operator_)
+* [`bool tensorflow::Status::operator!=(const Status &x) const`](#bool_tensorflow_Status_operator_)
+* [`void tensorflow::Status::Update(const Status &new_status)`](#void_tensorflow_Status_Update)
+ * If "ok()", stores "new_status" into *this. If "!ok()", preserves the current status, but may augment with additional information about "new_status".
+* [`string tensorflow::Status::ToString() const`](#string_tensorflow_Status_ToString)
+ * Return a string representation of this status suitable for printing. Returns the string "OK" for success.
+* [`static Status tensorflow::Status::OK()`](#static_Status_tensorflow_Status_OK)
##Member Details <a class="md-anchor" id="AUTOGENERATED-member-details"></a>
-#### tensorflow::Status::Status() <a class="md-anchor" id="tensorflow_Status_Status"></a>
+#### `tensorflow::Status::Status()` <a class="md-anchor" id="tensorflow_Status_Status"></a>
Create a success status.
-#### tensorflow::Status::~Status() <a class="md-anchor" id="tensorflow_Status_Status"></a>
+#### `tensorflow::Status::~Status()` <a class="md-anchor" id="tensorflow_Status_Status"></a>
-#### tensorflow::Status::Status(tensorflow::error::Code code, tensorflow::StringPiece msg) <a class="md-anchor" id="tensorflow_Status_Status"></a>
+#### `tensorflow::Status::Status(tensorflow::error::Code code, tensorflow::StringPiece msg)` <a class="md-anchor" id="tensorflow_Status_Status"></a>
Create a status with the specified error code and msg as a human-readable string containing more detailed information.
-#### tensorflow::Status::Status(const Status &amp;s) <a class="md-anchor" id="tensorflow_Status_Status"></a>
+#### `tensorflow::Status::Status(const Status &s)` <a class="md-anchor" id="tensorflow_Status_Status"></a>
Copy the specified status.
-#### void tensorflow::Status::operator=(const Status &amp;s) <a class="md-anchor" id="void_tensorflow_Status_operator_"></a>
+#### `void tensorflow::Status::operator=(const Status &s)` <a class="md-anchor" id="void_tensorflow_Status_operator_"></a>
-#### bool tensorflow::Status::ok() const <a class="md-anchor" id="bool_tensorflow_Status_ok"></a>
+#### `bool tensorflow::Status::ok() const` <a class="md-anchor" id="bool_tensorflow_Status_ok"></a>
Returns true iff the status indicates success.
-#### tensorflow::error::Code tensorflow::Status::code() const <a class="md-anchor" id="tensorflow_error_Code_tensorflow_Status_code"></a>
+#### `tensorflow::error::Code tensorflow::Status::code() const` <a class="md-anchor" id="tensorflow_error_Code_tensorflow_Status_code"></a>
-#### const string&amp; tensorflow::Status::error_message() const <a class="md-anchor" id="const_string_amp_tensorflow_Status_error_message"></a>
+#### `const string& tensorflow::Status::error_message() const` <a class="md-anchor" id="const_string_tensorflow_Status_error_message"></a>
-#### bool tensorflow::Status::operator==(const Status &amp;x) const <a class="md-anchor" id="bool_tensorflow_Status_operator_"></a>
+#### `bool tensorflow::Status::operator==(const Status &x) const` <a class="md-anchor" id="bool_tensorflow_Status_operator_"></a>
-#### bool tensorflow::Status::operator!=(const Status &amp;x) const <a class="md-anchor" id="bool_tensorflow_Status_operator_"></a>
+#### `bool tensorflow::Status::operator!=(const Status &x) const` <a class="md-anchor" id="bool_tensorflow_Status_operator_"></a>
-#### void tensorflow::Status::Update(const Status &amp;new_status) <a class="md-anchor" id="void_tensorflow_Status_Update"></a>
+#### `void tensorflow::Status::Update(const Status &new_status)` <a class="md-anchor" id="void_tensorflow_Status_Update"></a>
-If &quot;ok()&quot;, stores &quot;new_status&quot; into *this. If &quot;!ok()&quot;, preserves the current status, but may augment with additional information about &quot;new_status&quot;.
+If "ok()", stores "new_status" into *this. If "!ok()", preserves the current status, but may augment with additional information about "new_status".
Convenient way of keeping track of the first error encountered. Instead of: if (overall_status.ok()) overall_status = new_status Use: overall_status.Update(new_status);
-#### string tensorflow::Status::ToString() const <a class="md-anchor" id="string_tensorflow_Status_ToString"></a>
+#### `string tensorflow::Status::ToString() const` <a class="md-anchor" id="string_tensorflow_Status_ToString"></a>
-Return a string representation of this status suitable for printing. Returns the string &quot;OK&quot; for success.
+Return a string representation of this status suitable for printing. Returns the string "OK" for success.
-#### static Status tensorflow::Status::OK() <a class="md-anchor" id="static_Status_tensorflow_Status_OK"></a>
+#### `static Status tensorflow::Status::OK()` <a class="md-anchor" id="static_Status_tensorflow_Status_OK"></a>
diff --git a/tensorflow/g3doc/api_docs/cc/ClassTensor.md b/tensorflow/g3doc/api_docs/cc/ClassTensor.md
index 37d52bfacf..39f6b45124 100644
--- a/tensorflow/g3doc/api_docs/cc/ClassTensor.md
+++ b/tensorflow/g3doc/api_docs/cc/ClassTensor.md
@@ -1,4 +1,4 @@
-#Class tensorflow::Tensor <a class="md-anchor" id="AUTOGENERATED-class-tensorflow--tensor"></a>
+# Class `tensorflow::Tensor` <a class="md-anchor" id="AUTOGENERATED-class--tensorflow--tensor-"></a>
Represents an n-dimensional array of values.
@@ -6,175 +6,175 @@ Represents an n-dimensional array of values.
##Member Summary <a class="md-anchor" id="AUTOGENERATED-member-summary"></a>
-* [tensorflow::Tensor::Tensor](#tensorflow_Tensor_Tensor)
+* [`tensorflow::Tensor::Tensor()`](#tensorflow_Tensor_Tensor)
* Default Tensor constructor. Creates a 1-dimension, 0-element float tensor.
-* [tensorflow::Tensor::Tensor](#tensorflow_Tensor_Tensor)
+* [`tensorflow::Tensor::Tensor(DataType type, const TensorShape &shape)`](#tensorflow_Tensor_Tensor)
* Creates a Tensor of the given datatype and shape.
-* [tensorflow::Tensor::Tensor](#tensorflow_Tensor_Tensor)
+* [`tensorflow::Tensor::Tensor(Allocator *a, DataType type, const TensorShape &shape)`](#tensorflow_Tensor_Tensor)
* Creates a tensor with the input datatype and shape, using the allocator &apos;a&apos; to allocate the underlying buffer.
-* [tensorflow::Tensor::Tensor](#tensorflow_Tensor_Tensor)
+* [`tensorflow::Tensor::Tensor(DataType type)`](#tensorflow_Tensor_Tensor)
* Creates an uninitialized Tensor of the given data type.
-* [tensorflow::Tensor::Tensor](#tensorflow_Tensor_Tensor)
-* [tensorflow::Tensor::~Tensor](#tensorflow_Tensor_Tensor)
+* [`tensorflow::Tensor::Tensor(const Tensor &other)`](#tensorflow_Tensor_Tensor)
+* [`tensorflow::Tensor::~Tensor()`](#tensorflow_Tensor_Tensor)
* Copy constructor.
-* [DataType tensorflow::Tensor::dtype](#DataType_tensorflow_Tensor_dtype)
+* [`DataType tensorflow::Tensor::dtype() const`](#DataType_tensorflow_Tensor_dtype)
* Returns the data type.
-* [const TensorShape&amp; tensorflow::Tensor::shape](#const_TensorShape_amp_tensorflow_Tensor_shape)
+* [`const TensorShape& tensorflow::Tensor::shape() const`](#const_TensorShape_tensorflow_Tensor_shape)
* Returns the shape of the tensor.
-* [int tensorflow::Tensor::dims](#int_tensorflow_Tensor_dims)
+* [`int tensorflow::Tensor::dims() const`](#int_tensorflow_Tensor_dims)
* Convenience accessor for the tensor shape.
-* [int64 tensorflow::Tensor::dim_size](#int64_tensorflow_Tensor_dim_size)
+* [`int64 tensorflow::Tensor::dim_size(int d) const`](#int64_tensorflow_Tensor_dim_size)
* Convenience accessor for the tensor shape.
-* [int64 tensorflow::Tensor::NumElements](#int64_tensorflow_Tensor_NumElements)
+* [`int64 tensorflow::Tensor::NumElements() const`](#int64_tensorflow_Tensor_NumElements)
* Convenience accessor for the tensor shape.
-* [bool tensorflow::Tensor::IsSameSize](#bool_tensorflow_Tensor_IsSameSize)
-* [bool tensorflow::Tensor::IsInitialized](#bool_tensorflow_Tensor_IsInitialized)
+* [`bool tensorflow::Tensor::IsSameSize(const Tensor &b) const`](#bool_tensorflow_Tensor_IsSameSize)
+* [`bool tensorflow::Tensor::IsInitialized() const`](#bool_tensorflow_Tensor_IsInitialized)
* Has this Tensor been initialized?
-* [size_t tensorflow::Tensor::TotalBytes](#size_t_tensorflow_Tensor_TotalBytes)
+* [`size_t tensorflow::Tensor::TotalBytes() const`](#size_t_tensorflow_Tensor_TotalBytes)
* Returns the estimated memory usage of this tensor.
-* [Tensor&amp; tensorflow::Tensor::operator=](#Tensor_amp_tensorflow_Tensor_operator_)
+* [`Tensor& tensorflow::Tensor::operator=(const Tensor &other)`](#Tensor_tensorflow_Tensor_operator_)
* Assign operator. This tensor shares other&apos;s underlying storage.
-* [bool tensorflow::Tensor::CopyFrom](#bool_tensorflow_Tensor_CopyFrom)
+* [`bool tensorflow::Tensor::CopyFrom(const Tensor &other, const TensorShape &shape) TF_MUST_USE_RESULT`](#bool_tensorflow_Tensor_CopyFrom)
* Copy the other tensor into this tensor and reshape it.
-* [Tensor tensorflow::Tensor::Slice](#Tensor_tensorflow_Tensor_Slice)
+* [`Tensor tensorflow::Tensor::Slice(int64 dim0_start, int64 dim0_limit) const`](#Tensor_tensorflow_Tensor_Slice)
* Slice this tensor along the 1st dimension.
-* [bool tensorflow::Tensor::FromProto](#bool_tensorflow_Tensor_FromProto)
+* [`bool tensorflow::Tensor::FromProto(const TensorProto &other) TF_MUST_USE_RESULT`](#bool_tensorflow_Tensor_FromProto)
* Parse "other&apos; and construct the tensor.
-* [bool tensorflow::Tensor::FromProto](#bool_tensorflow_Tensor_FromProto)
-* [void tensorflow::Tensor::AsProtoField](#void_tensorflow_Tensor_AsProtoField)
- * Fills in &quot;proto&quot; with &quot;*this&quot; tensor&apos;s content.
-* [void tensorflow::Tensor::AsProtoTensorContent](#void_tensorflow_Tensor_AsProtoTensorContent)
-* [TTypes&lt;T&gt;::Vec tensorflow::Tensor::vec](#TTypes_lt_T_gt_Vec_tensorflow_Tensor_vec)
+* [`bool tensorflow::Tensor::FromProto(Allocator *a, const TensorProto &other) TF_MUST_USE_RESULT`](#bool_tensorflow_Tensor_FromProto)
+* [`void tensorflow::Tensor::AsProtoField(TensorProto *proto) const`](#void_tensorflow_Tensor_AsProtoField)
+ * Fills in "proto" with "*this" tensor&apos;s content.
+* [`void tensorflow::Tensor::AsProtoTensorContent(TensorProto *proto) const`](#void_tensorflow_Tensor_AsProtoTensorContent)
+* [`TTypes<T>::Vec tensorflow::Tensor::vec()`](#TTypes_T_Vec_tensorflow_Tensor_vec)
* Return the Tensor data as an Eigen::Tensor with the type and sizes of this Tensor .
-* [TTypes&lt;T&gt;::Matrix tensorflow::Tensor::matrix](#TTypes_lt_T_gt_Matrix_tensorflow_Tensor_matrix)
-* [TTypes&lt; T, NDIMS &gt;::Tensor tensorflow::Tensor::tensor](#TTypes_lt_T_NDIMS_gt_Tensor_tensorflow_Tensor_tensor)
-* [TTypes&lt;T&gt;::Flat tensorflow::Tensor::flat](#TTypes_lt_T_gt_Flat_tensorflow_Tensor_flat)
+* [`TTypes<T>::Matrix tensorflow::Tensor::matrix()`](#TTypes_T_Matrix_tensorflow_Tensor_matrix)
+* [`TTypes< T, NDIMS >::Tensor tensorflow::Tensor::tensor()`](#TTypes_T_NDIMS_Tensor_tensorflow_Tensor_tensor)
+* [`TTypes<T>::Flat tensorflow::Tensor::flat()`](#TTypes_T_Flat_tensorflow_Tensor_flat)
* Return the Tensor data as an Eigen::Tensor of the data type and a specified shape.
-* [TTypes&lt;T&gt;::UnalignedFlat tensorflow::Tensor::unaligned_flat](#TTypes_lt_T_gt_UnalignedFlat_tensorflow_Tensor_unaligned_flat)
-* [TTypes&lt;T&gt;::Matrix tensorflow::Tensor::flat_inner_dims](#TTypes_lt_T_gt_Matrix_tensorflow_Tensor_flat_inner_dims)
-* [TTypes&lt;T&gt;::Matrix tensorflow::Tensor::flat_outer_dims](#TTypes_lt_T_gt_Matrix_tensorflow_Tensor_flat_outer_dims)
-* [TTypes&lt; T, NDIMS &gt;::Tensor tensorflow::Tensor::shaped](#TTypes_lt_T_NDIMS_gt_Tensor_tensorflow_Tensor_shaped)
-* [TTypes&lt; T, NDIMS &gt;::UnalignedTensor tensorflow::Tensor::unaligned_shaped](#TTypes_lt_T_NDIMS_gt_UnalignedTensor_tensorflow_Tensor_unaligned_shaped)
-* [TTypes&lt; T &gt;::Scalar tensorflow::Tensor::scalar](#TTypes_lt_T_gt_Scalar_tensorflow_Tensor_scalar)
- * Return the Tensor data as a Tensor Map of fixed size 1: TensorMap&lt;TensorFixedSize&lt;T, 1&gt;&gt;.
-* [TTypes&lt;T&gt;::ConstVec tensorflow::Tensor::vec](#TTypes_lt_T_gt_ConstVec_tensorflow_Tensor_vec)
+* [`TTypes<T>::UnalignedFlat tensorflow::Tensor::unaligned_flat()`](#TTypes_T_UnalignedFlat_tensorflow_Tensor_unaligned_flat)
+* [`TTypes<T>::Matrix tensorflow::Tensor::flat_inner_dims()`](#TTypes_T_Matrix_tensorflow_Tensor_flat_inner_dims)
+* [`TTypes<T>::Matrix tensorflow::Tensor::flat_outer_dims()`](#TTypes_T_Matrix_tensorflow_Tensor_flat_outer_dims)
+* [`TTypes< T, NDIMS >::Tensor tensorflow::Tensor::shaped(gtl::ArraySlice< int64 > new_sizes)`](#TTypes_T_NDIMS_Tensor_tensorflow_Tensor_shaped)
+* [`TTypes< T, NDIMS >::UnalignedTensor tensorflow::Tensor::unaligned_shaped(gtl::ArraySlice< int64 > new_sizes)`](#TTypes_T_NDIMS_UnalignedTensor_tensorflow_Tensor_unaligned_shaped)
+* [`TTypes< T >::Scalar tensorflow::Tensor::scalar()`](#TTypes_T_Scalar_tensorflow_Tensor_scalar)
+ * Return the Tensor data as a Tensor Map of fixed size 1: TensorMap<TensorFixedSize<T, 1>>.
+* [`TTypes<T>::ConstVec tensorflow::Tensor::vec() const`](#TTypes_T_ConstVec_tensorflow_Tensor_vec)
* Const versions of all the methods above.
-* [TTypes&lt;T&gt;::ConstMatrix tensorflow::Tensor::matrix](#TTypes_lt_T_gt_ConstMatrix_tensorflow_Tensor_matrix)
-* [TTypes&lt; T, NDIMS &gt;::ConstTensor tensorflow::Tensor::tensor](#TTypes_lt_T_NDIMS_gt_ConstTensor_tensorflow_Tensor_tensor)
-* [TTypes&lt;T&gt;::ConstFlat tensorflow::Tensor::flat](#TTypes_lt_T_gt_ConstFlat_tensorflow_Tensor_flat)
-* [TTypes&lt;T&gt;::ConstUnalignedFlat tensorflow::Tensor::unaligned_flat](#TTypes_lt_T_gt_ConstUnalignedFlat_tensorflow_Tensor_unaligned_flat)
-* [TTypes&lt;T&gt;::ConstMatrix tensorflow::Tensor::flat_inner_dims](#TTypes_lt_T_gt_ConstMatrix_tensorflow_Tensor_flat_inner_dims)
-* [TTypes&lt;T&gt;::ConstMatrix tensorflow::Tensor::flat_outer_dims](#TTypes_lt_T_gt_ConstMatrix_tensorflow_Tensor_flat_outer_dims)
-* [TTypes&lt; T, NDIMS &gt;::ConstTensor tensorflow::Tensor::shaped](#TTypes_lt_T_NDIMS_gt_ConstTensor_tensorflow_Tensor_shaped)
-* [TTypes&lt; T, NDIMS &gt;::ConstUnalignedTensor tensorflow::Tensor::unaligned_shaped](#TTypes_lt_T_NDIMS_gt_ConstUnalignedTensor_tensorflow_Tensor_unaligned_shaped)
-* [TTypes&lt; T &gt;::ConstScalar tensorflow::Tensor::scalar](#TTypes_lt_T_gt_ConstScalar_tensorflow_Tensor_scalar)
-* [string tensorflow::Tensor::SummarizeValue](#string_tensorflow_Tensor_SummarizeValue)
+* [`TTypes<T>::ConstMatrix tensorflow::Tensor::matrix() const`](#TTypes_T_ConstMatrix_tensorflow_Tensor_matrix)
+* [`TTypes< T, NDIMS >::ConstTensor tensorflow::Tensor::tensor() const`](#TTypes_T_NDIMS_ConstTensor_tensorflow_Tensor_tensor)
+* [`TTypes<T>::ConstFlat tensorflow::Tensor::flat() const`](#TTypes_T_ConstFlat_tensorflow_Tensor_flat)
+* [`TTypes<T>::UnalignedConstFlat tensorflow::Tensor::unaligned_flat() const`](#TTypes_T_UnalignedConstFlat_tensorflow_Tensor_unaligned_flat)
+* [`TTypes<T>::ConstMatrix tensorflow::Tensor::flat_inner_dims() const`](#TTypes_T_ConstMatrix_tensorflow_Tensor_flat_inner_dims)
+* [`TTypes<T>::ConstMatrix tensorflow::Tensor::flat_outer_dims() const`](#TTypes_T_ConstMatrix_tensorflow_Tensor_flat_outer_dims)
+* [`TTypes< T, NDIMS >::ConstTensor tensorflow::Tensor::shaped(gtl::ArraySlice< int64 > new_sizes) const`](#TTypes_T_NDIMS_ConstTensor_tensorflow_Tensor_shaped)
+* [`TTypes< T, NDIMS >::UnalignedConstTensor tensorflow::Tensor::unaligned_shaped(gtl::ArraySlice< int64 > new_sizes) const`](#TTypes_T_NDIMS_UnalignedConstTensor_tensorflow_Tensor_unaligned_shaped)
+* [`TTypes< T >::ConstScalar tensorflow::Tensor::scalar() const`](#TTypes_T_ConstScalar_tensorflow_Tensor_scalar)
+* [`string tensorflow::Tensor::SummarizeValue(int64 max_entries) const`](#string_tensorflow_Tensor_SummarizeValue)
* Render the first max_entries values in *this into a string.
-* [string tensorflow::Tensor::DebugString](#string_tensorflow_Tensor_DebugString)
+* [`string tensorflow::Tensor::DebugString() const`](#string_tensorflow_Tensor_DebugString)
* A human-readable summary of the Tensor suitable for debugging.
-* [void tensorflow::Tensor::FillDescription](#void_tensorflow_Tensor_FillDescription)
-* [StringPiece tensorflow::Tensor::tensor_data](#StringPiece_tensorflow_Tensor_tensor_data)
+* [`void tensorflow::Tensor::FillDescription(TensorDescription *description) const`](#void_tensorflow_Tensor_FillDescription)
+* [`StringPiece tensorflow::Tensor::tensor_data() const`](#StringPiece_tensorflow_Tensor_tensor_data)
* Returns a StringPiece mapping the current tensor&apos;s buffer.
##Member Details <a class="md-anchor" id="AUTOGENERATED-member-details"></a>
-#### tensorflow::Tensor::Tensor() <a class="md-anchor" id="tensorflow_Tensor_Tensor"></a>
+#### `tensorflow::Tensor::Tensor()` <a class="md-anchor" id="tensorflow_Tensor_Tensor"></a>
Default Tensor constructor. Creates a 1-dimension, 0-element float tensor.
-#### tensorflow::Tensor::Tensor(DataType type, const TensorShape &amp;shape) <a class="md-anchor" id="tensorflow_Tensor_Tensor"></a>
+#### `tensorflow::Tensor::Tensor(DataType type, const TensorShape &shape)` <a class="md-anchor" id="tensorflow_Tensor_Tensor"></a>
Creates a Tensor of the given datatype and shape.
The underlying buffer is allocated using a CPUAllocator.
-#### tensorflow::Tensor::Tensor(Allocator *a, DataType type, const TensorShape &amp;shape) <a class="md-anchor" id="tensorflow_Tensor_Tensor"></a>
+#### `tensorflow::Tensor::Tensor(Allocator *a, DataType type, const TensorShape &shape)` <a class="md-anchor" id="tensorflow_Tensor_Tensor"></a>
Creates a tensor with the input datatype and shape, using the allocator &apos;a&apos; to allocate the underlying buffer.
&apos;a&apos; must outlive the lifetime of this Tensor .
-#### tensorflow::Tensor::Tensor(DataType type) <a class="md-anchor" id="tensorflow_Tensor_Tensor"></a>
+#### `tensorflow::Tensor::Tensor(DataType type)` <a class="md-anchor" id="tensorflow_Tensor_Tensor"></a>
Creates an uninitialized Tensor of the given data type.
-#### tensorflow::Tensor::Tensor(const Tensor &amp;other) <a class="md-anchor" id="tensorflow_Tensor_Tensor"></a>
+#### `tensorflow::Tensor::Tensor(const Tensor &other)` <a class="md-anchor" id="tensorflow_Tensor_Tensor"></a>
-#### tensorflow::Tensor::~Tensor() <a class="md-anchor" id="tensorflow_Tensor_Tensor"></a>
+#### `tensorflow::Tensor::~Tensor()` <a class="md-anchor" id="tensorflow_Tensor_Tensor"></a>
Copy constructor.
-#### DataType tensorflow::Tensor::dtype() const <a class="md-anchor" id="DataType_tensorflow_Tensor_dtype"></a>
+#### `DataType tensorflow::Tensor::dtype() const` <a class="md-anchor" id="DataType_tensorflow_Tensor_dtype"></a>
Returns the data type.
-#### const TensorShape&amp; tensorflow::Tensor::shape() const <a class="md-anchor" id="const_TensorShape_amp_tensorflow_Tensor_shape"></a>
+#### `const TensorShape& tensorflow::Tensor::shape() const` <a class="md-anchor" id="const_TensorShape_tensorflow_Tensor_shape"></a>
Returns the shape of the tensor.
-#### int tensorflow::Tensor::dims() const <a class="md-anchor" id="int_tensorflow_Tensor_dims"></a>
+#### `int tensorflow::Tensor::dims() const` <a class="md-anchor" id="int_tensorflow_Tensor_dims"></a>
Convenience accessor for the tensor shape.
For all shape accessors, see comments for relevant methods of TensorShape in tensor_shape.h .
-#### int64 tensorflow::Tensor::dim_size(int d) const <a class="md-anchor" id="int64_tensorflow_Tensor_dim_size"></a>
+#### `int64 tensorflow::Tensor::dim_size(int d) const` <a class="md-anchor" id="int64_tensorflow_Tensor_dim_size"></a>
Convenience accessor for the tensor shape.
-#### int64 tensorflow::Tensor::NumElements() const <a class="md-anchor" id="int64_tensorflow_Tensor_NumElements"></a>
+#### `int64 tensorflow::Tensor::NumElements() const` <a class="md-anchor" id="int64_tensorflow_Tensor_NumElements"></a>
Convenience accessor for the tensor shape.
-#### bool tensorflow::Tensor::IsSameSize(const Tensor &amp;b) const <a class="md-anchor" id="bool_tensorflow_Tensor_IsSameSize"></a>
+#### `bool tensorflow::Tensor::IsSameSize(const Tensor &b) const` <a class="md-anchor" id="bool_tensorflow_Tensor_IsSameSize"></a>
-#### bool tensorflow::Tensor::IsInitialized() const <a class="md-anchor" id="bool_tensorflow_Tensor_IsInitialized"></a>
+#### `bool tensorflow::Tensor::IsInitialized() const` <a class="md-anchor" id="bool_tensorflow_Tensor_IsInitialized"></a>
Has this Tensor been initialized?
-#### size_t tensorflow::Tensor::TotalBytes() const <a class="md-anchor" id="size_t_tensorflow_Tensor_TotalBytes"></a>
+#### `size_t tensorflow::Tensor::TotalBytes() const` <a class="md-anchor" id="size_t_tensorflow_Tensor_TotalBytes"></a>
Returns the estimated memory usage of this tensor.
-#### Tensor&amp; tensorflow::Tensor::operator=(const Tensor &amp;other) <a class="md-anchor" id="Tensor_amp_tensorflow_Tensor_operator_"></a>
+#### `Tensor& tensorflow::Tensor::operator=(const Tensor &other)` <a class="md-anchor" id="Tensor_tensorflow_Tensor_operator_"></a>
Assign operator. This tensor shares other&apos;s underlying storage.
-#### bool tensorflow::Tensor::CopyFrom(const Tensor &amp;other, const TensorShape &amp;shape) TF_MUST_USE_RESULT <a class="md-anchor" id="bool_tensorflow_Tensor_CopyFrom"></a>
+#### `bool tensorflow::Tensor::CopyFrom(const Tensor &other, const TensorShape &shape) TF_MUST_USE_RESULT` <a class="md-anchor" id="bool_tensorflow_Tensor_CopyFrom"></a>
Copy the other tensor into this tensor and reshape it.
-This tensor shares other&apos;s underlying storage. Returns true iff other.shape() has the same number of elements of the given &quot;shape&quot;.
+This tensor shares other&apos;s underlying storage. Returns true iff other.shape() has the same number of elements of the given "shape".
-#### Tensor tensorflow::Tensor::Slice(int64 dim0_start, int64 dim0_limit) const <a class="md-anchor" id="Tensor_tensorflow_Tensor_Slice"></a>
+#### `Tensor tensorflow::Tensor::Slice(int64 dim0_start, int64 dim0_limit) const` <a class="md-anchor" id="Tensor_tensorflow_Tensor_Slice"></a>
Slice this tensor along the 1st dimension.
@@ -182,175 +182,202 @@ I.e., the returned tensor satisifies returned[i, ...] == this[dim0_start + i, ..
NOTE: The returned tensor may not satisfies the same alignment requirement as this tensor depending on the shape. The caller must check the returned tensor&apos;s alignment before calling certain methods that have alignment requirement (e.g., flat() , tensor()).
-REQUIRES: dims() &gt;= 1 REQUIRES: 0 &lt;= dim0_start &lt;= dim0_limit &lt;= dim_size(0)
+REQUIRES: dims() >= 1 REQUIRES: 0 <= dim0_start <= dim0_limit <= dim_size(0)
-#### bool tensorflow::Tensor::FromProto(const TensorProto &amp;other) TF_MUST_USE_RESULT <a class="md-anchor" id="bool_tensorflow_Tensor_FromProto"></a>
+#### `bool tensorflow::Tensor::FromProto(const TensorProto &other) TF_MUST_USE_RESULT` <a class="md-anchor" id="bool_tensorflow_Tensor_FromProto"></a>
Parse "other&apos; and construct the tensor.
-Returns true iff the parsing succeeds. If the parsing fails, the state of &quot;*this&quot; is unchanged.
+Returns true iff the parsing succeeds. If the parsing fails, the state of "*this" is unchanged.
-#### bool tensorflow::Tensor::FromProto(Allocator *a, const TensorProto &amp;other) TF_MUST_USE_RESULT <a class="md-anchor" id="bool_tensorflow_Tensor_FromProto"></a>
+#### `bool tensorflow::Tensor::FromProto(Allocator *a, const TensorProto &other) TF_MUST_USE_RESULT` <a class="md-anchor" id="bool_tensorflow_Tensor_FromProto"></a>
-#### void tensorflow::Tensor::AsProtoField(TensorProto *proto) const <a class="md-anchor" id="void_tensorflow_Tensor_AsProtoField"></a>
+#### `void tensorflow::Tensor::AsProtoField(TensorProto *proto) const` <a class="md-anchor" id="void_tensorflow_Tensor_AsProtoField"></a>
-Fills in &quot;proto&quot; with &quot;*this&quot; tensor&apos;s content.
+Fills in "proto" with "*this" tensor&apos;s content.
AsProtoField() fills in the repeated field for proto.dtype(), while AsProtoTensorContent() encodes the content in proto.tensor_content() in a compact form.
-#### void tensorflow::Tensor::AsProtoTensorContent(TensorProto *proto) const <a class="md-anchor" id="void_tensorflow_Tensor_AsProtoTensorContent"></a>
+#### `void tensorflow::Tensor::AsProtoTensorContent(TensorProto *proto) const` <a class="md-anchor" id="void_tensorflow_Tensor_AsProtoTensorContent"></a>
-#### TTypes&lt;T&gt;::Vec tensorflow::Tensor::vec() <a class="md-anchor" id="TTypes_lt_T_gt_Vec_tensorflow_Tensor_vec"></a>
+#### `TTypes<T>::Vec tensorflow::Tensor::vec()` <a class="md-anchor" id="TTypes_T_Vec_tensorflow_Tensor_vec"></a>
Return the Tensor data as an Eigen::Tensor with the type and sizes of this Tensor .
Use these methods when you know the data type and the number of dimensions of the Tensor and you want an Eigen::Tensor automatically sized to the Tensor sizes. The implementation check fails if either type or sizes mismatch.
-Example: typedef float T; Tensor my_mat(...built with Shape{rows: 3, cols: 5}...); auto mat = my_mat.matrix&lt;T&gt;(); // 2D Eigen::Tensor, 3 x 5. auto mat = my_mat.tensor&lt;T, 2&gt;(); // 2D Eigen::Tensor, 3 x 5. auto vec = my_mat.vec&lt;T&gt;(); // CHECK fails as my_mat is 2D. auto vec = my_mat.tensor&lt;T, 3&gt;(); // CHECK fails as my_mat is 2D. auto mat = my_mat.matrix&lt;int32&gt;();// CHECK fails as type mismatch.
+Example:
-#### TTypes&lt;T&gt;::Matrix tensorflow::Tensor::matrix() <a class="md-anchor" id="TTypes_lt_T_gt_Matrix_tensorflow_Tensor_matrix"></a>
+```c++ typedef float T;
+Tensor my_mat(...built with Shape{rows: 3, cols: 5}...);
+auto mat = my_mat.matrix<T>(); // 2D Eigen::Tensor, 3 x 5.
+auto mat = my_mat.tensor<T, 2>(); // 2D Eigen::Tensor, 3 x 5.
+auto vec = my_mat.vec<T>(); // CHECK fails as my_mat is 2D.
+auto vec = my_mat.tensor<T, 3>(); // CHECK fails as my_mat is 2D.
+auto mat = my_mat.matrix<int32>();// CHECK fails as type mismatch.
+```
+#### `TTypes<T>::Matrix tensorflow::Tensor::matrix()` <a class="md-anchor" id="TTypes_T_Matrix_tensorflow_Tensor_matrix"></a>
-#### TTypes&lt; T, NDIMS &gt;::Tensor tensorflow::Tensor::tensor() <a class="md-anchor" id="TTypes_lt_T_NDIMS_gt_Tensor_tensorflow_Tensor_tensor"></a>
+#### `TTypes< T, NDIMS >::Tensor tensorflow::Tensor::tensor()` <a class="md-anchor" id="TTypes_T_NDIMS_Tensor_tensorflow_Tensor_tensor"></a>
-#### TTypes&lt;T&gt;::Flat tensorflow::Tensor::flat() <a class="md-anchor" id="TTypes_lt_T_gt_Flat_tensorflow_Tensor_flat"></a>
+
+
+#### `TTypes<T>::Flat tensorflow::Tensor::flat()` <a class="md-anchor" id="TTypes_T_Flat_tensorflow_Tensor_flat"></a>
Return the Tensor data as an Eigen::Tensor of the data type and a specified shape.
These methods allow you to access the data with the dimensions and sizes of your choice. You do not need to know the number of dimensions of the Tensor to call them. However, they CHECK that the type matches and the dimensions requested creates an Eigen::Tensor with the same number of elements as the Tensor .
-Example: typedef float T; Tensor my_ten(...built with Shape{planes: 4, rows: 3, cols: 5}...); // 1D Eigen::Tensor, size 60: auto flat = my_ten.flat&lt;T&gt;(); // 2D Eigen::Tensor 12 x 5: auto inner = my_ten.flat_inner_dims&lt;T&gt;(); // 2D Eigen::Tensor 4 x 15: auto outer = my_ten.shaped&lt;T, 2&gt;({4, 15}); // CHECK fails, bad num elements: auto outer = my_ten.shaped&lt;T, 2&gt;({4, 8}); // 3D Eigen::Tensor 6 x 5 x 2: auto weird = my_ten.shaped&lt;T, 3&gt;({6, 5, 2}); // CHECK fails, type mismatch: auto bad = my_ten.flat&lt;int32&gt;();
+Example:
+
+```c++ typedef float T;
+Tensor my_ten(...built with Shape{planes: 4, rows: 3, cols: 5}...);
+// 1D Eigen::Tensor, size 60:
+auto flat = my_ten.flat<T>();
+// 2D Eigen::Tensor 12 x 5:
+auto inner = my_ten.flat_inner_dims<T>();
+// 2D Eigen::Tensor 4 x 15:
+auto outer = my_ten.shaped<T, 2>({4, 15});
+// CHECK fails, bad num elements:
+auto outer = my_ten.shaped<T, 2>({4, 8});
+// 3D Eigen::Tensor 6 x 5 x 2:
+auto weird = my_ten.shaped<T, 3>({6, 5, 2});
+// CHECK fails, type mismatch:
+auto bad = my_ten.flat<int32>();
+
+```
-#### TTypes&lt;T&gt;::UnalignedFlat tensorflow::Tensor::unaligned_flat() <a class="md-anchor" id="TTypes_lt_T_gt_UnalignedFlat_tensorflow_Tensor_unaligned_flat"></a>
+#### `TTypes<T>::UnalignedFlat tensorflow::Tensor::unaligned_flat()` <a class="md-anchor" id="TTypes_T_UnalignedFlat_tensorflow_Tensor_unaligned_flat"></a>
-#### TTypes&lt;T&gt;::Matrix tensorflow::Tensor::flat_inner_dims() <a class="md-anchor" id="TTypes_lt_T_gt_Matrix_tensorflow_Tensor_flat_inner_dims"></a>
+#### `TTypes<T>::Matrix tensorflow::Tensor::flat_inner_dims()` <a class="md-anchor" id="TTypes_T_Matrix_tensorflow_Tensor_flat_inner_dims"></a>
Returns the data as an Eigen::Tensor with 2 dimensions, collapsing all Tensor dimensions but the last one into the first dimension of the result.
-#### TTypes&lt;T&gt;::Matrix tensorflow::Tensor::flat_outer_dims() <a class="md-anchor" id="TTypes_lt_T_gt_Matrix_tensorflow_Tensor_flat_outer_dims"></a>
+#### `TTypes<T>::Matrix tensorflow::Tensor::flat_outer_dims()` <a class="md-anchor" id="TTypes_T_Matrix_tensorflow_Tensor_flat_outer_dims"></a>
Returns the data as an Eigen::Tensor with 2 dimensions, collapsing all Tensor dimensions but the first one into the last dimension of the result.
-#### TTypes&lt; T, NDIMS &gt;::Tensor tensorflow::Tensor::shaped(gtl::ArraySlice&lt; int64 &gt; new_sizes) <a class="md-anchor" id="TTypes_lt_T_NDIMS_gt_Tensor_tensorflow_Tensor_shaped"></a>
+#### `TTypes< T, NDIMS >::Tensor tensorflow::Tensor::shaped(gtl::ArraySlice< int64 > new_sizes)` <a class="md-anchor" id="TTypes_T_NDIMS_Tensor_tensorflow_Tensor_shaped"></a>
-#### TTypes&lt; T, NDIMS &gt;::UnalignedTensor tensorflow::Tensor::unaligned_shaped(gtl::ArraySlice&lt; int64 &gt; new_sizes) <a class="md-anchor" id="TTypes_lt_T_NDIMS_gt_UnalignedTensor_tensorflow_Tensor_unaligned_shaped"></a>
+#### `TTypes< T, NDIMS >::UnalignedTensor tensorflow::Tensor::unaligned_shaped(gtl::ArraySlice< int64 > new_sizes)` <a class="md-anchor" id="TTypes_T_NDIMS_UnalignedTensor_tensorflow_Tensor_unaligned_shaped"></a>
-#### TTypes&lt; T &gt;::Scalar tensorflow::Tensor::scalar() <a class="md-anchor" id="TTypes_lt_T_gt_Scalar_tensorflow_Tensor_scalar"></a>
+#### `TTypes< T >::Scalar tensorflow::Tensor::scalar()` <a class="md-anchor" id="TTypes_T_Scalar_tensorflow_Tensor_scalar"></a>
-Return the Tensor data as a Tensor Map of fixed size 1: TensorMap&lt;TensorFixedSize&lt;T, 1&gt;&gt;.
+Return the Tensor data as a Tensor Map of fixed size 1: TensorMap<TensorFixedSize<T, 1>>.
Using scalar() allows the compiler to perform optimizations as the size of the tensor is known at compile time.
-#### TTypes&lt;T&gt;::ConstVec tensorflow::Tensor::vec() const <a class="md-anchor" id="TTypes_lt_T_gt_ConstVec_tensorflow_Tensor_vec"></a>
+#### `TTypes<T>::ConstVec tensorflow::Tensor::vec() const` <a class="md-anchor" id="TTypes_T_ConstVec_tensorflow_Tensor_vec"></a>
Const versions of all the methods above.
-#### TTypes&lt;T&gt;::ConstMatrix tensorflow::Tensor::matrix() const <a class="md-anchor" id="TTypes_lt_T_gt_ConstMatrix_tensorflow_Tensor_matrix"></a>
+#### `TTypes<T>::ConstMatrix tensorflow::Tensor::matrix() const` <a class="md-anchor" id="TTypes_T_ConstMatrix_tensorflow_Tensor_matrix"></a>
-#### TTypes&lt; T, NDIMS &gt;::ConstTensor tensorflow::Tensor::tensor() const <a class="md-anchor" id="TTypes_lt_T_NDIMS_gt_ConstTensor_tensorflow_Tensor_tensor"></a>
+#### `TTypes< T, NDIMS >::ConstTensor tensorflow::Tensor::tensor() const` <a class="md-anchor" id="TTypes_T_NDIMS_ConstTensor_tensorflow_Tensor_tensor"></a>
-#### TTypes&lt;T&gt;::ConstFlat tensorflow::Tensor::flat() const <a class="md-anchor" id="TTypes_lt_T_gt_ConstFlat_tensorflow_Tensor_flat"></a>
+#### `TTypes<T>::ConstFlat tensorflow::Tensor::flat() const` <a class="md-anchor" id="TTypes_T_ConstFlat_tensorflow_Tensor_flat"></a>
-#### TTypes&lt;T&gt;::ConstUnalignedFlat tensorflow::Tensor::unaligned_flat() const <a class="md-anchor" id="TTypes_lt_T_gt_ConstUnalignedFlat_tensorflow_Tensor_unaligned_flat"></a>
+#### `TTypes<T>::UnalignedConstFlat tensorflow::Tensor::unaligned_flat() const` <a class="md-anchor" id="TTypes_T_UnalignedConstFlat_tensorflow_Tensor_unaligned_flat"></a>
-#### TTypes&lt;T&gt;::ConstMatrix tensorflow::Tensor::flat_inner_dims() const <a class="md-anchor" id="TTypes_lt_T_gt_ConstMatrix_tensorflow_Tensor_flat_inner_dims"></a>
+#### `TTypes<T>::ConstMatrix tensorflow::Tensor::flat_inner_dims() const` <a class="md-anchor" id="TTypes_T_ConstMatrix_tensorflow_Tensor_flat_inner_dims"></a>
-#### TTypes&lt;T&gt;::ConstMatrix tensorflow::Tensor::flat_outer_dims() const <a class="md-anchor" id="TTypes_lt_T_gt_ConstMatrix_tensorflow_Tensor_flat_outer_dims"></a>
+#### `TTypes<T>::ConstMatrix tensorflow::Tensor::flat_outer_dims() const` <a class="md-anchor" id="TTypes_T_ConstMatrix_tensorflow_Tensor_flat_outer_dims"></a>
-#### TTypes&lt; T, NDIMS &gt;::ConstTensor tensorflow::Tensor::shaped(gtl::ArraySlice&lt; int64 &gt; new_sizes) const <a class="md-anchor" id="TTypes_lt_T_NDIMS_gt_ConstTensor_tensorflow_Tensor_shaped"></a>
+#### `TTypes< T, NDIMS >::ConstTensor tensorflow::Tensor::shaped(gtl::ArraySlice< int64 > new_sizes) const` <a class="md-anchor" id="TTypes_T_NDIMS_ConstTensor_tensorflow_Tensor_shaped"></a>
-#### TTypes&lt; T, NDIMS &gt;::ConstUnalignedTensor tensorflow::Tensor::unaligned_shaped(gtl::ArraySlice&lt; int64 &gt; new_sizes) const <a class="md-anchor" id="TTypes_lt_T_NDIMS_gt_ConstUnalignedTensor_tensorflow_Tensor_unaligned_shaped"></a>
+#### `TTypes< T, NDIMS >::UnalignedConstTensor tensorflow::Tensor::unaligned_shaped(gtl::ArraySlice< int64 > new_sizes) const` <a class="md-anchor" id="TTypes_T_NDIMS_UnalignedConstTensor_tensorflow_Tensor_unaligned_shaped"></a>
-#### TTypes&lt; T &gt;::ConstScalar tensorflow::Tensor::scalar() const <a class="md-anchor" id="TTypes_lt_T_gt_ConstScalar_tensorflow_Tensor_scalar"></a>
+#### `TTypes< T >::ConstScalar tensorflow::Tensor::scalar() const` <a class="md-anchor" id="TTypes_T_ConstScalar_tensorflow_Tensor_scalar"></a>
-#### string tensorflow::Tensor::SummarizeValue(int64 max_entries) const <a class="md-anchor" id="string_tensorflow_Tensor_SummarizeValue"></a>
+#### `string tensorflow::Tensor::SummarizeValue(int64 max_entries) const` <a class="md-anchor" id="string_tensorflow_Tensor_SummarizeValue"></a>
Render the first max_entries values in *this into a string.
-#### string tensorflow::Tensor::DebugString() const <a class="md-anchor" id="string_tensorflow_Tensor_DebugString"></a>
+#### `string tensorflow::Tensor::DebugString() const` <a class="md-anchor" id="string_tensorflow_Tensor_DebugString"></a>
A human-readable summary of the Tensor suitable for debugging.
-#### void tensorflow::Tensor::FillDescription(TensorDescription *description) const <a class="md-anchor" id="void_tensorflow_Tensor_FillDescription"></a>
+#### `void tensorflow::Tensor::FillDescription(TensorDescription *description) const` <a class="md-anchor" id="void_tensorflow_Tensor_FillDescription"></a>
Fill in the TensorDescription proto with metadata about the Tensor that is useful for monitoring and debugging.
-#### StringPiece tensorflow::Tensor::tensor_data() const <a class="md-anchor" id="StringPiece_tensorflow_Tensor_tensor_data"></a>
+#### `StringPiece tensorflow::Tensor::tensor_data() const` <a class="md-anchor" id="StringPiece_tensorflow_Tensor_tensor_data"></a>
Returns a StringPiece mapping the current tensor&apos;s buffer.
diff --git a/tensorflow/g3doc/api_docs/cc/ClassTensorBuffer.md b/tensorflow/g3doc/api_docs/cc/ClassTensorBuffer.md
index e6a76083dc..493d4da989 100644
--- a/tensorflow/g3doc/api_docs/cc/ClassTensorBuffer.md
+++ b/tensorflow/g3doc/api_docs/cc/ClassTensorBuffer.md
@@ -1,4 +1,4 @@
-#Class tensorflow::TensorBuffer <a class="md-anchor" id="AUTOGENERATED-class-tensorflow--tensorbuffer"></a>
+# Class `tensorflow::TensorBuffer` <a class="md-anchor" id="AUTOGENERATED-class--tensorflow--tensorbuffer-"></a>
@@ -6,46 +6,46 @@
##Member Summary <a class="md-anchor" id="AUTOGENERATED-member-summary"></a>
-* [tensorflow::TensorBuffer::~TensorBuffer](#tensorflow_TensorBuffer_TensorBuffer)
-* [virtual void* tensorflow::TensorBuffer::data](#virtual_void_tensorflow_TensorBuffer_data)
-* [virtual size_t tensorflow::TensorBuffer::size](#virtual_size_t_tensorflow_TensorBuffer_size)
-* [virtual TensorBuffer* tensorflow::TensorBuffer::root_buffer](#virtual_TensorBuffer_tensorflow_TensorBuffer_root_buffer)
-* [virtual void tensorflow::TensorBuffer::FillAllocationDescription](#virtual_void_tensorflow_TensorBuffer_FillAllocationDescription)
-* [T* tensorflow::TensorBuffer::base](#T_tensorflow_TensorBuffer_base)
+* [`tensorflow::TensorBuffer::~TensorBuffer() override`](#tensorflow_TensorBuffer_TensorBuffer)
+* [`virtual void* tensorflow::TensorBuffer::data() const =0`](#virtual_void_tensorflow_TensorBuffer_data)
+* [`virtual size_t tensorflow::TensorBuffer::size() const =0`](#virtual_size_t_tensorflow_TensorBuffer_size)
+* [`virtual TensorBuffer* tensorflow::TensorBuffer::root_buffer()=0`](#virtual_TensorBuffer_tensorflow_TensorBuffer_root_buffer)
+* [`virtual void tensorflow::TensorBuffer::FillAllocationDescription(AllocationDescription *proto) const =0`](#virtual_void_tensorflow_TensorBuffer_FillAllocationDescription)
+* [`T* tensorflow::TensorBuffer::base() const`](#T_tensorflow_TensorBuffer_base)
##Member Details <a class="md-anchor" id="AUTOGENERATED-member-details"></a>
-#### tensorflow::TensorBuffer::~TensorBuffer() override <a class="md-anchor" id="tensorflow_TensorBuffer_TensorBuffer"></a>
+#### `tensorflow::TensorBuffer::~TensorBuffer() override` <a class="md-anchor" id="tensorflow_TensorBuffer_TensorBuffer"></a>
-#### virtual void* tensorflow::TensorBuffer::data() const =0 <a class="md-anchor" id="virtual_void_tensorflow_TensorBuffer_data"></a>
+#### `virtual void* tensorflow::TensorBuffer::data() const =0` <a class="md-anchor" id="virtual_void_tensorflow_TensorBuffer_data"></a>
-#### virtual size_t tensorflow::TensorBuffer::size() const =0 <a class="md-anchor" id="virtual_size_t_tensorflow_TensorBuffer_size"></a>
+#### `virtual size_t tensorflow::TensorBuffer::size() const =0` <a class="md-anchor" id="virtual_size_t_tensorflow_TensorBuffer_size"></a>
-#### virtual TensorBuffer* tensorflow::TensorBuffer::root_buffer()=0 <a class="md-anchor" id="virtual_TensorBuffer_tensorflow_TensorBuffer_root_buffer"></a>
+#### `virtual TensorBuffer* tensorflow::TensorBuffer::root_buffer()=0` <a class="md-anchor" id="virtual_TensorBuffer_tensorflow_TensorBuffer_root_buffer"></a>
-#### virtual void tensorflow::TensorBuffer::FillAllocationDescription(AllocationDescription *proto) const =0 <a class="md-anchor" id="virtual_void_tensorflow_TensorBuffer_FillAllocationDescription"></a>
+#### `virtual void tensorflow::TensorBuffer::FillAllocationDescription(AllocationDescription *proto) const =0` <a class="md-anchor" id="virtual_void_tensorflow_TensorBuffer_FillAllocationDescription"></a>
-#### T* tensorflow::TensorBuffer::base() const <a class="md-anchor" id="T_tensorflow_TensorBuffer_base"></a>
+#### `T* tensorflow::TensorBuffer::base() const` <a class="md-anchor" id="T_tensorflow_TensorBuffer_base"></a>
diff --git a/tensorflow/g3doc/api_docs/cc/ClassTensorShape.md b/tensorflow/g3doc/api_docs/cc/ClassTensorShape.md
index c2318c0dac..f672d4164e 100644
--- a/tensorflow/g3doc/api_docs/cc/ClassTensorShape.md
+++ b/tensorflow/g3doc/api_docs/cc/ClassTensorShape.md
@@ -1,4 +1,4 @@
-#Class tensorflow::TensorShape <a class="md-anchor" id="AUTOGENERATED-class-tensorflow--tensorshape"></a>
+# Class `tensorflow::TensorShape` <a class="md-anchor" id="AUTOGENERATED-class--tensorflow--tensorshape-"></a>
Manages the dimensions of a Tensor and their sizes.
@@ -6,191 +6,191 @@ Manages the dimensions of a Tensor and their sizes.
##Member Summary <a class="md-anchor" id="AUTOGENERATED-member-summary"></a>
-* [tensorflow::TensorShape::TensorShape](#tensorflow_TensorShape_TensorShape)
- * Construct a TensorShape from the provided sizes.. REQUIRES: dim_sizes[i] &gt;= 0.
-* [tensorflow::TensorShape::TensorShape](#tensorflow_TensorShape_TensorShape)
-* [tensorflow::TensorShape::TensorShape](#tensorflow_TensorShape_TensorShape)
+* [`tensorflow::TensorShape::TensorShape(gtl::ArraySlice< int64 > dim_sizes)`](#tensorflow_TensorShape_TensorShape)
+ * Construct a TensorShape from the provided sizes.. REQUIRES: dim_sizes[i] >= 0.
+* [`tensorflow::TensorShape::TensorShape(std::initializer_list< int64 > dim_sizes)`](#tensorflow_TensorShape_TensorShape)
+* [`tensorflow::TensorShape::TensorShape(const TensorShapeProto &proto)`](#tensorflow_TensorShape_TensorShape)
* REQUIRES: IsValid(proto)
-* [tensorflow::TensorShape::TensorShape](#tensorflow_TensorShape_TensorShape)
-* [void tensorflow::TensorShape::Clear](#void_tensorflow_TensorShape_Clear)
+* [`tensorflow::TensorShape::TensorShape()`](#tensorflow_TensorShape_TensorShape)
+* [`void tensorflow::TensorShape::Clear()`](#void_tensorflow_TensorShape_Clear)
* Clear a tensor shape.
-* [void tensorflow::TensorShape::AddDim](#void_tensorflow_TensorShape_AddDim)
- * Add a dimension to the end (&quot;inner-most&quot;). REQUIRES: size &gt;= 0.
-* [void tensorflow::TensorShape::AppendShape](#void_tensorflow_TensorShape_AppendShape)
+* [`void tensorflow::TensorShape::AddDim(int64 size)`](#void_tensorflow_TensorShape_AddDim)
+ * Add a dimension to the end ("inner-most"). REQUIRES: size >= 0.
+* [`void tensorflow::TensorShape::AppendShape(const TensorShape &shape)`](#void_tensorflow_TensorShape_AppendShape)
* Appends all the dimensions from shape.
-* [void tensorflow::TensorShape::InsertDim](#void_tensorflow_TensorShape_InsertDim)
- * Insert a dimension somewhere in the TensorShape . REQUIRES: &quot;0 &lt;= d &lt;= dims()&quot; REQUIRES: size &gt;= 0.
-* [void tensorflow::TensorShape::set_dim](#void_tensorflow_TensorShape_set_dim)
- * Modifies the size of the dimension &apos;d&apos; to be &apos;size&apos; REQUIRES: &quot;0 &lt;= d &lt; dims()&quot; REQUIRES: size &gt;= 0.
-* [void tensorflow::TensorShape::RemoveDim](#void_tensorflow_TensorShape_RemoveDim)
- * Removes dimension &apos;d&apos; from the TensorShape . REQUIRES: &quot;0 &lt;= d &lt; dims()&quot;.
-* [int tensorflow::TensorShape::dims](#int_tensorflow_TensorShape_dims)
+* [`void tensorflow::TensorShape::InsertDim(int d, int64 size)`](#void_tensorflow_TensorShape_InsertDim)
+ * Insert a dimension somewhere in the TensorShape . REQUIRES: "0 <= d <= dims()" REQUIRES: size >= 0.
+* [`void tensorflow::TensorShape::set_dim(int d, int64 size)`](#void_tensorflow_TensorShape_set_dim)
+ * Modifies the size of the dimension &apos;d&apos; to be &apos;size&apos; REQUIRES: "0 <= d < dims()" REQUIRES: size >= 0.
+* [`void tensorflow::TensorShape::RemoveDim(int d)`](#void_tensorflow_TensorShape_RemoveDim)
+ * Removes dimension &apos;d&apos; from the TensorShape . REQUIRES: "0 <= d < dims()".
+* [`int tensorflow::TensorShape::dims() const`](#int_tensorflow_TensorShape_dims)
* Return the number of dimensions in the tensor.
-* [int64 tensorflow::TensorShape::dim_size](#int64_tensorflow_TensorShape_dim_size)
- * Returns the number of elements in dimension &quot;d&quot;. REQUIRES: &quot;0 &lt;= d &lt; dims()&quot;.
-* [gtl::ArraySlice&lt;int64&gt; tensorflow::TensorShape::dim_sizes](#gtl_ArraySlice_lt_int64_gt_tensorflow_TensorShape_dim_sizes)
+* [`int64 tensorflow::TensorShape::dim_size(int d) const`](#int64_tensorflow_TensorShape_dim_size)
+ * Returns the number of elements in dimension "d". REQUIRES: "0 <= d < dims()".
+* [`gtl::ArraySlice<int64> tensorflow::TensorShape::dim_sizes() const`](#gtl_ArraySlice_int64_tensorflow_TensorShape_dim_sizes)
* Returns sizes of all dimensions.
-* [int64 tensorflow::TensorShape::num_elements](#int64_tensorflow_TensorShape_num_elements)
+* [`int64 tensorflow::TensorShape::num_elements() const`](#int64_tensorflow_TensorShape_num_elements)
* Returns the number of elements in the tensor.
-* [bool tensorflow::TensorShape::IsSameSize](#bool_tensorflow_TensorShape_IsSameSize)
+* [`bool tensorflow::TensorShape::IsSameSize(const TensorShape &b) const`](#bool_tensorflow_TensorShape_IsSameSize)
* Returns true if *this and b have the same sizes. Ignores dimension names.
-* [bool tensorflow::TensorShape::operator==](#bool_tensorflow_TensorShape_operator_)
-* [void tensorflow::TensorShape::AsProto](#void_tensorflow_TensorShape_AsProto)
+* [`bool tensorflow::TensorShape::operator==(const TensorShape &b) const`](#bool_tensorflow_TensorShape_operator_)
+* [`void tensorflow::TensorShape::AsProto(TensorShapeProto *proto) const`](#void_tensorflow_TensorShape_AsProto)
* Fill *proto from *this.
-* [Eigen::DSizes&lt; Eigen::DenseIndex, NDIMS &gt; tensorflow::TensorShape::AsEigenDSizes](#Eigen_DSizes_lt_Eigen_DenseIndex_NDIMS_gt_tensorflow_TensorShape_AsEigenDSizes)
+* [`Eigen::DSizes< Eigen::DenseIndex, NDIMS > tensorflow::TensorShape::AsEigenDSizes() const`](#Eigen_DSizes_Eigen_DenseIndex_NDIMS_tensorflow_TensorShape_AsEigenDSizes)
* Fill *dsizes from *this.
-* [Eigen::DSizes&lt; Eigen::DenseIndex, NDIMS &gt; tensorflow::TensorShape::AsEigenDSizesWithPadding](#Eigen_DSizes_lt_Eigen_DenseIndex_NDIMS_gt_tensorflow_TensorShape_AsEigenDSizesWithPadding)
-* [TensorShapeIter tensorflow::TensorShape::begin](#TensorShapeIter_tensorflow_TensorShape_begin)
+* [`Eigen::DSizes< Eigen::DenseIndex, NDIMS > tensorflow::TensorShape::AsEigenDSizesWithPadding() const`](#Eigen_DSizes_Eigen_DenseIndex_NDIMS_tensorflow_TensorShape_AsEigenDSizesWithPadding)
+* [`TensorShapeIter tensorflow::TensorShape::begin() const`](#TensorShapeIter_tensorflow_TensorShape_begin)
* For iterating through the dimensions.
-* [TensorShapeIter tensorflow::TensorShape::end](#TensorShapeIter_tensorflow_TensorShape_end)
-* [string tensorflow::TensorShape::DebugString](#string_tensorflow_TensorShape_DebugString)
+* [`TensorShapeIter tensorflow::TensorShape::end() const`](#TensorShapeIter_tensorflow_TensorShape_end)
+* [`string tensorflow::TensorShape::DebugString() const`](#string_tensorflow_TensorShape_DebugString)
* For error messages.
-* [string tensorflow::TensorShape::ShortDebugString](#string_tensorflow_TensorShape_ShortDebugString)
-* [static bool tensorflow::TensorShape::IsValid](#static_bool_tensorflow_TensorShape_IsValid)
- * Returns true iff &quot;proto&quot; is a valid tensor shape.
+* [`string tensorflow::TensorShape::ShortDebugString() const`](#string_tensorflow_TensorShape_ShortDebugString)
+* [`static bool tensorflow::TensorShape::IsValid(const TensorShapeProto &proto)`](#static_bool_tensorflow_TensorShape_IsValid)
+ * Returns true iff "proto" is a valid tensor shape.
##Member Details <a class="md-anchor" id="AUTOGENERATED-member-details"></a>
-#### tensorflow::TensorShape::TensorShape(gtl::ArraySlice&lt; int64 &gt; dim_sizes) <a class="md-anchor" id="tensorflow_TensorShape_TensorShape"></a>
+#### `tensorflow::TensorShape::TensorShape(gtl::ArraySlice< int64 > dim_sizes)` <a class="md-anchor" id="tensorflow_TensorShape_TensorShape"></a>
-Construct a TensorShape from the provided sizes.. REQUIRES: dim_sizes[i] &gt;= 0.
+Construct a TensorShape from the provided sizes.. REQUIRES: dim_sizes[i] >= 0.
-#### tensorflow::TensorShape::TensorShape(std::initializer_list&lt; int64 &gt; dim_sizes) <a class="md-anchor" id="tensorflow_TensorShape_TensorShape"></a>
+#### `tensorflow::TensorShape::TensorShape(std::initializer_list< int64 > dim_sizes)` <a class="md-anchor" id="tensorflow_TensorShape_TensorShape"></a>
-#### tensorflow::TensorShape::TensorShape(const TensorShapeProto &amp;proto) <a class="md-anchor" id="tensorflow_TensorShape_TensorShape"></a>
+#### `tensorflow::TensorShape::TensorShape(const TensorShapeProto &proto)` <a class="md-anchor" id="tensorflow_TensorShape_TensorShape"></a>
REQUIRES: IsValid(proto)
-#### tensorflow::TensorShape::TensorShape() <a class="md-anchor" id="tensorflow_TensorShape_TensorShape"></a>
+#### `tensorflow::TensorShape::TensorShape()` <a class="md-anchor" id="tensorflow_TensorShape_TensorShape"></a>
Create a tensor shape with no dimensions and one element, which you can then call AddDim() on.
-#### void tensorflow::TensorShape::Clear() <a class="md-anchor" id="void_tensorflow_TensorShape_Clear"></a>
+#### `void tensorflow::TensorShape::Clear()` <a class="md-anchor" id="void_tensorflow_TensorShape_Clear"></a>
Clear a tensor shape.
-#### void tensorflow::TensorShape::AddDim(int64 size) <a class="md-anchor" id="void_tensorflow_TensorShape_AddDim"></a>
+#### `void tensorflow::TensorShape::AddDim(int64 size)` <a class="md-anchor" id="void_tensorflow_TensorShape_AddDim"></a>
-Add a dimension to the end (&quot;inner-most&quot;). REQUIRES: size &gt;= 0.
+Add a dimension to the end ("inner-most"). REQUIRES: size >= 0.
-#### void tensorflow::TensorShape::AppendShape(const TensorShape &amp;shape) <a class="md-anchor" id="void_tensorflow_TensorShape_AppendShape"></a>
+#### `void tensorflow::TensorShape::AppendShape(const TensorShape &shape)` <a class="md-anchor" id="void_tensorflow_TensorShape_AppendShape"></a>
Appends all the dimensions from shape.
-#### void tensorflow::TensorShape::InsertDim(int d, int64 size) <a class="md-anchor" id="void_tensorflow_TensorShape_InsertDim"></a>
+#### `void tensorflow::TensorShape::InsertDim(int d, int64 size)` <a class="md-anchor" id="void_tensorflow_TensorShape_InsertDim"></a>
-Insert a dimension somewhere in the TensorShape . REQUIRES: &quot;0 &lt;= d &lt;= dims()&quot; REQUIRES: size &gt;= 0.
+Insert a dimension somewhere in the TensorShape . REQUIRES: "0 <= d <= dims()" REQUIRES: size >= 0.
-#### void tensorflow::TensorShape::set_dim(int d, int64 size) <a class="md-anchor" id="void_tensorflow_TensorShape_set_dim"></a>
+#### `void tensorflow::TensorShape::set_dim(int d, int64 size)` <a class="md-anchor" id="void_tensorflow_TensorShape_set_dim"></a>
-Modifies the size of the dimension &apos;d&apos; to be &apos;size&apos; REQUIRES: &quot;0 &lt;= d &lt; dims()&quot; REQUIRES: size &gt;= 0.
+Modifies the size of the dimension &apos;d&apos; to be &apos;size&apos; REQUIRES: "0 <= d < dims()" REQUIRES: size >= 0.
-#### void tensorflow::TensorShape::RemoveDim(int d) <a class="md-anchor" id="void_tensorflow_TensorShape_RemoveDim"></a>
+#### `void tensorflow::TensorShape::RemoveDim(int d)` <a class="md-anchor" id="void_tensorflow_TensorShape_RemoveDim"></a>
-Removes dimension &apos;d&apos; from the TensorShape . REQUIRES: &quot;0 &lt;= d &lt; dims()&quot;.
+Removes dimension &apos;d&apos; from the TensorShape . REQUIRES: "0 <= d < dims()".
-#### int tensorflow::TensorShape::dims() const <a class="md-anchor" id="int_tensorflow_TensorShape_dims"></a>
+#### `int tensorflow::TensorShape::dims() const` <a class="md-anchor" id="int_tensorflow_TensorShape_dims"></a>
Return the number of dimensions in the tensor.
-#### int64 tensorflow::TensorShape::dim_size(int d) const <a class="md-anchor" id="int64_tensorflow_TensorShape_dim_size"></a>
+#### `int64 tensorflow::TensorShape::dim_size(int d) const` <a class="md-anchor" id="int64_tensorflow_TensorShape_dim_size"></a>
-Returns the number of elements in dimension &quot;d&quot;. REQUIRES: &quot;0 &lt;= d &lt; dims()&quot;.
+Returns the number of elements in dimension "d". REQUIRES: "0 <= d < dims()".
-#### gtl::ArraySlice&lt;int64&gt; tensorflow::TensorShape::dim_sizes() const <a class="md-anchor" id="gtl_ArraySlice_lt_int64_gt_tensorflow_TensorShape_dim_sizes"></a>
+#### `gtl::ArraySlice<int64> tensorflow::TensorShape::dim_sizes() const` <a class="md-anchor" id="gtl_ArraySlice_int64_tensorflow_TensorShape_dim_sizes"></a>
Returns sizes of all dimensions.
-#### int64 tensorflow::TensorShape::num_elements() const <a class="md-anchor" id="int64_tensorflow_TensorShape_num_elements"></a>
+#### `int64 tensorflow::TensorShape::num_elements() const` <a class="md-anchor" id="int64_tensorflow_TensorShape_num_elements"></a>
Returns the number of elements in the tensor.
We use int64 and not size_t to be compatible with Eigen::Tensor which uses ptr_fi
-#### bool tensorflow::TensorShape::IsSameSize(const TensorShape &amp;b) const <a class="md-anchor" id="bool_tensorflow_TensorShape_IsSameSize"></a>
+#### `bool tensorflow::TensorShape::IsSameSize(const TensorShape &b) const` <a class="md-anchor" id="bool_tensorflow_TensorShape_IsSameSize"></a>
Returns true if *this and b have the same sizes. Ignores dimension names.
-#### bool tensorflow::TensorShape::operator==(const TensorShape &amp;b) const <a class="md-anchor" id="bool_tensorflow_TensorShape_operator_"></a>
+#### `bool tensorflow::TensorShape::operator==(const TensorShape &b) const` <a class="md-anchor" id="bool_tensorflow_TensorShape_operator_"></a>
-#### void tensorflow::TensorShape::AsProto(TensorShapeProto *proto) const <a class="md-anchor" id="void_tensorflow_TensorShape_AsProto"></a>
+#### `void tensorflow::TensorShape::AsProto(TensorShapeProto *proto) const` <a class="md-anchor" id="void_tensorflow_TensorShape_AsProto"></a>
Fill *proto from *this.
-#### Eigen::DSizes&lt; Eigen::DenseIndex, NDIMS &gt; tensorflow::TensorShape::AsEigenDSizes() const <a class="md-anchor" id="Eigen_DSizes_lt_Eigen_DenseIndex_NDIMS_gt_tensorflow_TensorShape_AsEigenDSizes"></a>
+#### `Eigen::DSizes< Eigen::DenseIndex, NDIMS > tensorflow::TensorShape::AsEigenDSizes() const` <a class="md-anchor" id="Eigen_DSizes_Eigen_DenseIndex_NDIMS_tensorflow_TensorShape_AsEigenDSizes"></a>
Fill *dsizes from *this.
-#### Eigen::DSizes&lt; Eigen::DenseIndex, NDIMS &gt; tensorflow::TensorShape::AsEigenDSizesWithPadding() const <a class="md-anchor" id="Eigen_DSizes_lt_Eigen_DenseIndex_NDIMS_gt_tensorflow_TensorShape_AsEigenDSizesWithPadding"></a>
+#### `Eigen::DSizes< Eigen::DenseIndex, NDIMS > tensorflow::TensorShape::AsEigenDSizesWithPadding() const` <a class="md-anchor" id="Eigen_DSizes_Eigen_DenseIndex_NDIMS_tensorflow_TensorShape_AsEigenDSizesWithPadding"></a>
-Same as AsEigenDSizes() but allows for NDIMS &gt; dims() in which case we pad the rest of the sizes with 1.
+Same as AsEigenDSizes() but allows for NDIMS > dims() in which case we pad the rest of the sizes with 1.
-#### TensorShapeIter tensorflow::TensorShape::begin() const <a class="md-anchor" id="TensorShapeIter_tensorflow_TensorShape_begin"></a>
+#### `TensorShapeIter tensorflow::TensorShape::begin() const` <a class="md-anchor" id="TensorShapeIter_tensorflow_TensorShape_begin"></a>
For iterating through the dimensions.
-#### TensorShapeIter tensorflow::TensorShape::end() const <a class="md-anchor" id="TensorShapeIter_tensorflow_TensorShape_end"></a>
+#### `TensorShapeIter tensorflow::TensorShape::end() const` <a class="md-anchor" id="TensorShapeIter_tensorflow_TensorShape_end"></a>
-#### string tensorflow::TensorShape::DebugString() const <a class="md-anchor" id="string_tensorflow_TensorShape_DebugString"></a>
+#### `string tensorflow::TensorShape::DebugString() const` <a class="md-anchor" id="string_tensorflow_TensorShape_DebugString"></a>
For error messages.
-#### string tensorflow::TensorShape::ShortDebugString() const <a class="md-anchor" id="string_tensorflow_TensorShape_ShortDebugString"></a>
+#### `string tensorflow::TensorShape::ShortDebugString() const` <a class="md-anchor" id="string_tensorflow_TensorShape_ShortDebugString"></a>
-#### static bool tensorflow::TensorShape::IsValid(const TensorShapeProto &amp;proto) <a class="md-anchor" id="static_bool_tensorflow_TensorShape_IsValid"></a>
+#### `static bool tensorflow::TensorShape::IsValid(const TensorShapeProto &proto)` <a class="md-anchor" id="static_bool_tensorflow_TensorShape_IsValid"></a>
-Returns true iff &quot;proto&quot; is a valid tensor shape.
+Returns true iff "proto" is a valid tensor shape.
diff --git a/tensorflow/g3doc/api_docs/cc/ClassTensorShapeIter.md b/tensorflow/g3doc/api_docs/cc/ClassTensorShapeIter.md
index 4789df2e0e..ada68f238a 100644
--- a/tensorflow/g3doc/api_docs/cc/ClassTensorShapeIter.md
+++ b/tensorflow/g3doc/api_docs/cc/ClassTensorShapeIter.md
@@ -1,4 +1,4 @@
-#Class tensorflow::TensorShapeIter <a class="md-anchor" id="AUTOGENERATED-class-tensorflow--tensorshapeiter"></a>
+# Class `tensorflow::TensorShapeIter` <a class="md-anchor" id="AUTOGENERATED-class--tensorflow--tensorshapeiter-"></a>
@@ -6,39 +6,39 @@
##Member Summary <a class="md-anchor" id="AUTOGENERATED-member-summary"></a>
-* [tensorflow::TensorShapeIter::TensorShapeIter](#tensorflow_TensorShapeIter_TensorShapeIter)
-* [bool tensorflow::TensorShapeIter::operator==](#bool_tensorflow_TensorShapeIter_operator_)
-* [bool tensorflow::TensorShapeIter::operator!=](#bool_tensorflow_TensorShapeIter_operator_)
-* [void tensorflow::TensorShapeIter::operator++](#void_tensorflow_TensorShapeIter_operator_)
-* [TensorShapeDim tensorflow::TensorShapeIter::operator*](#TensorShapeDim_tensorflow_TensorShapeIter_operator_)
+* [`tensorflow::TensorShapeIter::TensorShapeIter(const TensorShape *shape, int d)`](#tensorflow_TensorShapeIter_TensorShapeIter)
+* [`bool tensorflow::TensorShapeIter::operator==(const TensorShapeIter &rhs)`](#bool_tensorflow_TensorShapeIter_operator_)
+* [`bool tensorflow::TensorShapeIter::operator!=(const TensorShapeIter &rhs)`](#bool_tensorflow_TensorShapeIter_operator_)
+* [`void tensorflow::TensorShapeIter::operator++()`](#void_tensorflow_TensorShapeIter_operator_)
+* [`TensorShapeDim tensorflow::TensorShapeIter::operator*()`](#TensorShapeDim_tensorflow_TensorShapeIter_operator_)
##Member Details <a class="md-anchor" id="AUTOGENERATED-member-details"></a>
-#### tensorflow::TensorShapeIter::TensorShapeIter(const TensorShape *shape, int d) <a class="md-anchor" id="tensorflow_TensorShapeIter_TensorShapeIter"></a>
+#### `tensorflow::TensorShapeIter::TensorShapeIter(const TensorShape *shape, int d)` <a class="md-anchor" id="tensorflow_TensorShapeIter_TensorShapeIter"></a>
-#### bool tensorflow::TensorShapeIter::operator==(const TensorShapeIter &amp;rhs) <a class="md-anchor" id="bool_tensorflow_TensorShapeIter_operator_"></a>
+#### `bool tensorflow::TensorShapeIter::operator==(const TensorShapeIter &rhs)` <a class="md-anchor" id="bool_tensorflow_TensorShapeIter_operator_"></a>
-#### bool tensorflow::TensorShapeIter::operator!=(const TensorShapeIter &amp;rhs) <a class="md-anchor" id="bool_tensorflow_TensorShapeIter_operator_"></a>
+#### `bool tensorflow::TensorShapeIter::operator!=(const TensorShapeIter &rhs)` <a class="md-anchor" id="bool_tensorflow_TensorShapeIter_operator_"></a>
-#### void tensorflow::TensorShapeIter::operator++() <a class="md-anchor" id="void_tensorflow_TensorShapeIter_operator_"></a>
+#### `void tensorflow::TensorShapeIter::operator++()` <a class="md-anchor" id="void_tensorflow_TensorShapeIter_operator_"></a>
-#### TensorShapeDim tensorflow::TensorShapeIter::operator*() <a class="md-anchor" id="TensorShapeDim_tensorflow_TensorShapeIter_operator_"></a>
+#### `TensorShapeDim tensorflow::TensorShapeIter::operator*()` <a class="md-anchor" id="TensorShapeDim_tensorflow_TensorShapeIter_operator_"></a>
diff --git a/tensorflow/g3doc/api_docs/cc/ClassTensorShapeUtils.md b/tensorflow/g3doc/api_docs/cc/ClassTensorShapeUtils.md
index 2221ebdd91..53348cd57d 100644
--- a/tensorflow/g3doc/api_docs/cc/ClassTensorShapeUtils.md
+++ b/tensorflow/g3doc/api_docs/cc/ClassTensorShapeUtils.md
@@ -1,4 +1,4 @@
-#Class tensorflow::TensorShapeUtils <a class="md-anchor" id="AUTOGENERATED-class-tensorflow--tensorshapeutils"></a>
+# Class `tensorflow::TensorShapeUtils` <a class="md-anchor" id="AUTOGENERATED-class--tensorflow--tensorshapeutils-"></a>
Static helper routines for TensorShape . Includes a few common predicates on a tensor shape.
@@ -6,75 +6,75 @@ Static helper routines for TensorShape . Includes a few common predicates on a t
##Member Summary <a class="md-anchor" id="AUTOGENERATED-member-summary"></a>
-* [static bool tensorflow::TensorShapeUtils::IsScalar](#static_bool_tensorflow_TensorShapeUtils_IsScalar)
-* [static bool tensorflow::TensorShapeUtils::IsVector](#static_bool_tensorflow_TensorShapeUtils_IsVector)
-* [static bool tensorflow::TensorShapeUtils::IsLegacyScalar](#static_bool_tensorflow_TensorShapeUtils_IsLegacyScalar)
-* [static bool tensorflow::TensorShapeUtils::IsLegacyVector](#static_bool_tensorflow_TensorShapeUtils_IsLegacyVector)
-* [static bool tensorflow::TensorShapeUtils::IsVectorOrHigher](#static_bool_tensorflow_TensorShapeUtils_IsVectorOrHigher)
-* [static bool tensorflow::TensorShapeUtils::IsMatrix](#static_bool_tensorflow_TensorShapeUtils_IsMatrix)
-* [static bool tensorflow::TensorShapeUtils::IsMatrixOrHigher](#static_bool_tensorflow_TensorShapeUtils_IsMatrixOrHigher)
-* [static TensorShape tensorflow::TensorShapeUtils::MakeShape](#static_TensorShape_tensorflow_TensorShapeUtils_MakeShape)
+* [`static bool tensorflow::TensorShapeUtils::IsScalar(const TensorShape &shape)`](#static_bool_tensorflow_TensorShapeUtils_IsScalar)
+* [`static bool tensorflow::TensorShapeUtils::IsVector(const TensorShape &shape)`](#static_bool_tensorflow_TensorShapeUtils_IsVector)
+* [`static bool tensorflow::TensorShapeUtils::IsLegacyScalar(const TensorShape &shape)`](#static_bool_tensorflow_TensorShapeUtils_IsLegacyScalar)
+* [`static bool tensorflow::TensorShapeUtils::IsLegacyVector(const TensorShape &shape)`](#static_bool_tensorflow_TensorShapeUtils_IsLegacyVector)
+* [`static bool tensorflow::TensorShapeUtils::IsVectorOrHigher(const TensorShape &shape)`](#static_bool_tensorflow_TensorShapeUtils_IsVectorOrHigher)
+* [`static bool tensorflow::TensorShapeUtils::IsMatrix(const TensorShape &shape)`](#static_bool_tensorflow_TensorShapeUtils_IsMatrix)
+* [`static bool tensorflow::TensorShapeUtils::IsMatrixOrHigher(const TensorShape &shape)`](#static_bool_tensorflow_TensorShapeUtils_IsMatrixOrHigher)
+* [`static TensorShape tensorflow::TensorShapeUtils::MakeShape(const T *dims, int n)`](#static_TensorShape_tensorflow_TensorShapeUtils_MakeShape)
* Returns a TensorShape whose dimensions are dims[0], dims[1], ..., dims[n-1].
-* [static string tensorflow::TensorShapeUtils::ShapeListString](#static_string_tensorflow_TensorShapeUtils_ShapeListString)
-* [static bool tensorflow::TensorShapeUtils::StartsWith](#static_bool_tensorflow_TensorShapeUtils_StartsWith)
+* [`static string tensorflow::TensorShapeUtils::ShapeListString(const gtl::ArraySlice< TensorShape > &shapes)`](#static_string_tensorflow_TensorShapeUtils_ShapeListString)
+* [`static bool tensorflow::TensorShapeUtils::StartsWith(const TensorShape &shape0, const TensorShape &shape1)`](#static_bool_tensorflow_TensorShapeUtils_StartsWith)
##Member Details <a class="md-anchor" id="AUTOGENERATED-member-details"></a>
-#### static bool tensorflow::TensorShapeUtils::IsScalar(const TensorShape &amp;shape) <a class="md-anchor" id="static_bool_tensorflow_TensorShapeUtils_IsScalar"></a>
+#### `static bool tensorflow::TensorShapeUtils::IsScalar(const TensorShape &shape)` <a class="md-anchor" id="static_bool_tensorflow_TensorShapeUtils_IsScalar"></a>
-#### static bool tensorflow::TensorShapeUtils::IsVector(const TensorShape &amp;shape) <a class="md-anchor" id="static_bool_tensorflow_TensorShapeUtils_IsVector"></a>
+#### `static bool tensorflow::TensorShapeUtils::IsVector(const TensorShape &shape)` <a class="md-anchor" id="static_bool_tensorflow_TensorShapeUtils_IsVector"></a>
-#### static bool tensorflow::TensorShapeUtils::IsLegacyScalar(const TensorShape &amp;shape) <a class="md-anchor" id="static_bool_tensorflow_TensorShapeUtils_IsLegacyScalar"></a>
+#### `static bool tensorflow::TensorShapeUtils::IsLegacyScalar(const TensorShape &shape)` <a class="md-anchor" id="static_bool_tensorflow_TensorShapeUtils_IsLegacyScalar"></a>
-#### static bool tensorflow::TensorShapeUtils::IsLegacyVector(const TensorShape &amp;shape) <a class="md-anchor" id="static_bool_tensorflow_TensorShapeUtils_IsLegacyVector"></a>
+#### `static bool tensorflow::TensorShapeUtils::IsLegacyVector(const TensorShape &shape)` <a class="md-anchor" id="static_bool_tensorflow_TensorShapeUtils_IsLegacyVector"></a>
-#### static bool tensorflow::TensorShapeUtils::IsVectorOrHigher(const TensorShape &amp;shape) <a class="md-anchor" id="static_bool_tensorflow_TensorShapeUtils_IsVectorOrHigher"></a>
+#### `static bool tensorflow::TensorShapeUtils::IsVectorOrHigher(const TensorShape &shape)` <a class="md-anchor" id="static_bool_tensorflow_TensorShapeUtils_IsVectorOrHigher"></a>
-#### static bool tensorflow::TensorShapeUtils::IsMatrix(const TensorShape &amp;shape) <a class="md-anchor" id="static_bool_tensorflow_TensorShapeUtils_IsMatrix"></a>
+#### `static bool tensorflow::TensorShapeUtils::IsMatrix(const TensorShape &shape)` <a class="md-anchor" id="static_bool_tensorflow_TensorShapeUtils_IsMatrix"></a>
-#### static bool tensorflow::TensorShapeUtils::IsMatrixOrHigher(const TensorShape &amp;shape) <a class="md-anchor" id="static_bool_tensorflow_TensorShapeUtils_IsMatrixOrHigher"></a>
+#### `static bool tensorflow::TensorShapeUtils::IsMatrixOrHigher(const TensorShape &shape)` <a class="md-anchor" id="static_bool_tensorflow_TensorShapeUtils_IsMatrixOrHigher"></a>
-#### static TensorShape tensorflow::TensorShapeUtils::MakeShape(const T *dims, int n) <a class="md-anchor" id="static_TensorShape_tensorflow_TensorShapeUtils_MakeShape"></a>
+#### `static TensorShape tensorflow::TensorShapeUtils::MakeShape(const T *dims, int n)` <a class="md-anchor" id="static_TensorShape_tensorflow_TensorShapeUtils_MakeShape"></a>
Returns a TensorShape whose dimensions are dims[0], dims[1], ..., dims[n-1].
-#### static string tensorflow::TensorShapeUtils::ShapeListString(const gtl::ArraySlice&lt; TensorShape &gt; &amp;shapes) <a class="md-anchor" id="static_string_tensorflow_TensorShapeUtils_ShapeListString"></a>
+#### `static string tensorflow::TensorShapeUtils::ShapeListString(const gtl::ArraySlice< TensorShape > &shapes)` <a class="md-anchor" id="static_string_tensorflow_TensorShapeUtils_ShapeListString"></a>
-#### static bool tensorflow::TensorShapeUtils::StartsWith(const TensorShape &amp;shape0, const TensorShape &amp;shape1) <a class="md-anchor" id="static_bool_tensorflow_TensorShapeUtils_StartsWith"></a>
+#### `static bool tensorflow::TensorShapeUtils::StartsWith(const TensorShape &shape0, const TensorShape &shape1)` <a class="md-anchor" id="static_bool_tensorflow_TensorShapeUtils_StartsWith"></a>
diff --git a/tensorflow/g3doc/api_docs/cc/ClassThread.md b/tensorflow/g3doc/api_docs/cc/ClassThread.md
index 9ae21780df..118f1eab53 100644
--- a/tensorflow/g3doc/api_docs/cc/ClassThread.md
+++ b/tensorflow/g3doc/api_docs/cc/ClassThread.md
@@ -1,4 +1,4 @@
-#Class tensorflow::Thread <a class="md-anchor" id="AUTOGENERATED-class-tensorflow--thread"></a>
+# Class `tensorflow::Thread` <a class="md-anchor" id="AUTOGENERATED-class--tensorflow--thread-"></a>
@@ -6,19 +6,19 @@
##Member Summary <a class="md-anchor" id="AUTOGENERATED-member-summary"></a>
-* [tensorflow::Thread::Thread](#tensorflow_Thread_Thread)
-* [virtual tensorflow::Thread::~Thread](#virtual_tensorflow_Thread_Thread)
+* [`tensorflow::Thread::Thread()`](#tensorflow_Thread_Thread)
+* [`virtual tensorflow::Thread::~Thread()`](#virtual_tensorflow_Thread_Thread)
* Blocks until the thread of control stops running.
##Member Details <a class="md-anchor" id="AUTOGENERATED-member-details"></a>
-#### tensorflow::Thread::Thread() <a class="md-anchor" id="tensorflow_Thread_Thread"></a>
+#### `tensorflow::Thread::Thread()` <a class="md-anchor" id="tensorflow_Thread_Thread"></a>
-#### virtual tensorflow::Thread::~Thread() <a class="md-anchor" id="virtual_tensorflow_Thread_Thread"></a>
+#### `virtual tensorflow::Thread::~Thread()` <a class="md-anchor" id="virtual_tensorflow_Thread_Thread"></a>
Blocks until the thread of control stops running.
diff --git a/tensorflow/g3doc/api_docs/cc/ClassWritableFile.md b/tensorflow/g3doc/api_docs/cc/ClassWritableFile.md
index b9923cfe56..075632f91c 100644
--- a/tensorflow/g3doc/api_docs/cc/ClassWritableFile.md
+++ b/tensorflow/g3doc/api_docs/cc/ClassWritableFile.md
@@ -1,4 +1,4 @@
-#Class tensorflow::WritableFile <a class="md-anchor" id="AUTOGENERATED-class-tensorflow--writablefile"></a>
+# Class `tensorflow::WritableFile` <a class="md-anchor" id="AUTOGENERATED-class--tensorflow--writablefile-"></a>
A file abstraction for sequential writing.
@@ -6,46 +6,46 @@ The implementation must provide buffering since callers may append small fragmen
##Member Summary <a class="md-anchor" id="AUTOGENERATED-member-summary"></a>
-* [tensorflow::WritableFile::WritableFile](#tensorflow_WritableFile_WritableFile)
-* [virtual tensorflow::WritableFile::~WritableFile](#virtual_tensorflow_WritableFile_WritableFile)
-* [virtual Status tensorflow::WritableFile::Append](#virtual_Status_tensorflow_WritableFile_Append)
-* [virtual Status tensorflow::WritableFile::Close](#virtual_Status_tensorflow_WritableFile_Close)
-* [virtual Status tensorflow::WritableFile::Flush](#virtual_Status_tensorflow_WritableFile_Flush)
-* [virtual Status tensorflow::WritableFile::Sync](#virtual_Status_tensorflow_WritableFile_Sync)
+* [`tensorflow::WritableFile::WritableFile()`](#tensorflow_WritableFile_WritableFile)
+* [`virtual tensorflow::WritableFile::~WritableFile()`](#virtual_tensorflow_WritableFile_WritableFile)
+* [`virtual Status tensorflow::WritableFile::Append(const StringPiece &data)=0`](#virtual_Status_tensorflow_WritableFile_Append)
+* [`virtual Status tensorflow::WritableFile::Close()=0`](#virtual_Status_tensorflow_WritableFile_Close)
+* [`virtual Status tensorflow::WritableFile::Flush()=0`](#virtual_Status_tensorflow_WritableFile_Flush)
+* [`virtual Status tensorflow::WritableFile::Sync()=0`](#virtual_Status_tensorflow_WritableFile_Sync)
##Member Details <a class="md-anchor" id="AUTOGENERATED-member-details"></a>
-#### tensorflow::WritableFile::WritableFile() <a class="md-anchor" id="tensorflow_WritableFile_WritableFile"></a>
+#### `tensorflow::WritableFile::WritableFile()` <a class="md-anchor" id="tensorflow_WritableFile_WritableFile"></a>
-#### virtual tensorflow::WritableFile::~WritableFile() <a class="md-anchor" id="virtual_tensorflow_WritableFile_WritableFile"></a>
+#### `virtual tensorflow::WritableFile::~WritableFile()` <a class="md-anchor" id="virtual_tensorflow_WritableFile_WritableFile"></a>
-#### virtual Status tensorflow::WritableFile::Append(const StringPiece &amp;data)=0 <a class="md-anchor" id="virtual_Status_tensorflow_WritableFile_Append"></a>
+#### `virtual Status tensorflow::WritableFile::Append(const StringPiece &data)=0` <a class="md-anchor" id="virtual_Status_tensorflow_WritableFile_Append"></a>
-#### virtual Status tensorflow::WritableFile::Close()=0 <a class="md-anchor" id="virtual_Status_tensorflow_WritableFile_Close"></a>
+#### `virtual Status tensorflow::WritableFile::Close()=0` <a class="md-anchor" id="virtual_Status_tensorflow_WritableFile_Close"></a>
-#### virtual Status tensorflow::WritableFile::Flush()=0 <a class="md-anchor" id="virtual_Status_tensorflow_WritableFile_Flush"></a>
+#### `virtual Status tensorflow::WritableFile::Flush()=0` <a class="md-anchor" id="virtual_Status_tensorflow_WritableFile_Flush"></a>
-#### virtual Status tensorflow::WritableFile::Sync()=0 <a class="md-anchor" id="virtual_Status_tensorflow_WritableFile_Sync"></a>
+#### `virtual Status tensorflow::WritableFile::Sync()=0` <a class="md-anchor" id="virtual_Status_tensorflow_WritableFile_Sync"></a>
diff --git a/tensorflow/g3doc/api_docs/cc/StructSessionOptions.md b/tensorflow/g3doc/api_docs/cc/StructSessionOptions.md
index 12f4ed9101..0643d917fc 100644
--- a/tensorflow/g3doc/api_docs/cc/StructSessionOptions.md
+++ b/tensorflow/g3doc/api_docs/cc/StructSessionOptions.md
@@ -1,4 +1,4 @@
-#Struct tensorflow::SessionOptions <a class="md-anchor" id="AUTOGENERATED-struct-tensorflow--sessionoptions"></a>
+# Struct `tensorflow::SessionOptions` <a class="md-anchor" id="AUTOGENERATED-struct--tensorflow--sessionoptions-"></a>
Configuration information for a Session .
@@ -6,43 +6,43 @@ Configuration information for a Session .
##Member Summary <a class="md-anchor" id="AUTOGENERATED-member-summary"></a>
-* [Env* tensorflow::SessionOptions::env](#Env_tensorflow_SessionOptions_env)
+* [`Env* tensorflow::SessionOptions::env`](#Env_tensorflow_SessionOptions_env)
* The environment to use.
-* [string tensorflow::SessionOptions::target](#string_tensorflow_SessionOptions_target)
+* [`string tensorflow::SessionOptions::target`](#string_tensorflow_SessionOptions_target)
* The TensorFlow runtime to connect to.
-* [ConfigProto tensorflow::SessionOptions::config](#ConfigProto_tensorflow_SessionOptions_config)
+* [`ConfigProto tensorflow::SessionOptions::config`](#ConfigProto_tensorflow_SessionOptions_config)
* Configuration options.
-* [tensorflow::SessionOptions::SessionOptions](#tensorflow_SessionOptions_SessionOptions)
+* [`tensorflow::SessionOptions::SessionOptions()`](#tensorflow_SessionOptions_SessionOptions)
##Member Details <a class="md-anchor" id="AUTOGENERATED-member-details"></a>
-#### Env* tensorflow::SessionOptions::env <a class="md-anchor" id="Env_tensorflow_SessionOptions_env"></a>
+#### `Env* tensorflow::SessionOptions::env` <a class="md-anchor" id="Env_tensorflow_SessionOptions_env"></a>
The environment to use.
-#### string tensorflow::SessionOptions::target <a class="md-anchor" id="string_tensorflow_SessionOptions_target"></a>
+#### `string tensorflow::SessionOptions::target` <a class="md-anchor" id="string_tensorflow_SessionOptions_target"></a>
The TensorFlow runtime to connect to.
If &apos;target&apos; is empty or unspecified, the local TensorFlow runtime implementation will be used. Otherwise, the TensorFlow engine defined by &apos;target&apos; will be used to perform all computations.
-&quot;target&quot; can be either a single entry or a comma separated list of entries. Each entry is a resolvable address of the following format: local ip:port host:port ... other system-specific formats to identify tasks and jobs ...
+"target" can be either a single entry or a comma separated list of entries. Each entry is a resolvable address of the following format: local ip:port host:port ... other system-specific formats to identify tasks and jobs ...
NOTE: at the moment &apos;local&apos; maps to an in-process service-based runtime.
-Upon creation, a single session affines itself to one of the remote processes, with possible load balancing choices when the &quot;target&quot; resolves to a list of possible processes.
+Upon creation, a single session affines itself to one of the remote processes, with possible load balancing choices when the "target" resolves to a list of possible processes.
If the session disconnects from the remote process during its lifetime, session calls may fail immediately.
-#### ConfigProto tensorflow::SessionOptions::config <a class="md-anchor" id="ConfigProto_tensorflow_SessionOptions_config"></a>
+#### `ConfigProto tensorflow::SessionOptions::config` <a class="md-anchor" id="ConfigProto_tensorflow_SessionOptions_config"></a>
Configuration options.
-#### tensorflow::SessionOptions::SessionOptions() <a class="md-anchor" id="tensorflow_SessionOptions_SessionOptions"></a>
+#### `tensorflow::SessionOptions::SessionOptions()` <a class="md-anchor" id="tensorflow_SessionOptions_SessionOptions"></a>
diff --git a/tensorflow/g3doc/api_docs/cc/StructState.md b/tensorflow/g3doc/api_docs/cc/StructState.md
index 5772ef46c9..8961925cfe 100644
--- a/tensorflow/g3doc/api_docs/cc/StructState.md
+++ b/tensorflow/g3doc/api_docs/cc/StructState.md
@@ -1,4 +1,4 @@
-#Struct tensorflow::Status::State <a class="md-anchor" id="AUTOGENERATED-struct-tensorflow--status--state"></a>
+# Struct `tensorflow::Status::State` <a class="md-anchor" id="AUTOGENERATED-struct--tensorflow--status--state-"></a>
@@ -6,18 +6,18 @@
##Member Summary <a class="md-anchor" id="AUTOGENERATED-member-summary"></a>
-* [tensorflow::error::Code tensorflow::Status::State::code](#tensorflow_error_Code_tensorflow_Status_State_code)
-* [string tensorflow::Status::State::msg](#string_tensorflow_Status_State_msg)
+* [`tensorflow::error::Code tensorflow::Status::State::code`](#tensorflow_error_Code_tensorflow_Status_State_code)
+* [`string tensorflow::Status::State::msg`](#string_tensorflow_Status_State_msg)
##Member Details <a class="md-anchor" id="AUTOGENERATED-member-details"></a>
-#### tensorflow::error::Code tensorflow::Status::State::code <a class="md-anchor" id="tensorflow_error_Code_tensorflow_Status_State_code"></a>
+#### `tensorflow::error::Code tensorflow::Status::State::code` <a class="md-anchor" id="tensorflow_error_Code_tensorflow_Status_State_code"></a>
-#### string tensorflow::Status::State::msg <a class="md-anchor" id="string_tensorflow_Status_State_msg"></a>
+#### `string tensorflow::Status::State::msg` <a class="md-anchor" id="string_tensorflow_Status_State_msg"></a>
diff --git a/tensorflow/g3doc/api_docs/cc/StructTensorShapeDim.md b/tensorflow/g3doc/api_docs/cc/StructTensorShapeDim.md
index f177d3c8a8..d37948f1dc 100644
--- a/tensorflow/g3doc/api_docs/cc/StructTensorShapeDim.md
+++ b/tensorflow/g3doc/api_docs/cc/StructTensorShapeDim.md
@@ -1,4 +1,4 @@
-#Struct tensorflow::TensorShapeDim <a class="md-anchor" id="AUTOGENERATED-struct-tensorflow--tensorshapedim"></a>
+# Struct `tensorflow::TensorShapeDim` <a class="md-anchor" id="AUTOGENERATED-struct--tensorflow--tensorshapedim-"></a>
@@ -6,18 +6,18 @@
##Member Summary <a class="md-anchor" id="AUTOGENERATED-member-summary"></a>
-* [int tensorflow::TensorShapeDim::size](#int_tensorflow_TensorShapeDim_size)
-* [tensorflow::TensorShapeDim::TensorShapeDim](#tensorflow_TensorShapeDim_TensorShapeDim)
+* [`int tensorflow::TensorShapeDim::size`](#int_tensorflow_TensorShapeDim_size)
+* [`tensorflow::TensorShapeDim::TensorShapeDim(int64 s)`](#tensorflow_TensorShapeDim_TensorShapeDim)
##Member Details <a class="md-anchor" id="AUTOGENERATED-member-details"></a>
-#### int tensorflow::TensorShapeDim::size <a class="md-anchor" id="int_tensorflow_TensorShapeDim_size"></a>
+#### `int tensorflow::TensorShapeDim::size` <a class="md-anchor" id="int_tensorflow_TensorShapeDim_size"></a>
-#### tensorflow::TensorShapeDim::TensorShapeDim(int64 s) <a class="md-anchor" id="tensorflow_TensorShapeDim_TensorShapeDim"></a>
+#### `tensorflow::TensorShapeDim::TensorShapeDim(int64 s)` <a class="md-anchor" id="tensorflow_TensorShapeDim_TensorShapeDim"></a>
diff --git a/tensorflow/g3doc/api_docs/cc/StructThreadOptions.md b/tensorflow/g3doc/api_docs/cc/StructThreadOptions.md
index 20dacebab2..6a7ee7fd6f 100644
--- a/tensorflow/g3doc/api_docs/cc/StructThreadOptions.md
+++ b/tensorflow/g3doc/api_docs/cc/StructThreadOptions.md
@@ -1,4 +1,4 @@
-#Struct tensorflow::ThreadOptions <a class="md-anchor" id="AUTOGENERATED-struct-tensorflow--threadoptions"></a>
+# Struct `tensorflow::ThreadOptions` <a class="md-anchor" id="AUTOGENERATED-struct--tensorflow--threadoptions-"></a>
Options to configure a Thread .
@@ -6,20 +6,20 @@ Note that the options are all hints, and the underlying implementation may choos
##Member Summary <a class="md-anchor" id="AUTOGENERATED-member-summary"></a>
-* [size_t tensorflow::ThreadOptions::stack_size](#size_t_tensorflow_ThreadOptions_stack_size)
+* [`size_t tensorflow::ThreadOptions::stack_size`](#size_t_tensorflow_ThreadOptions_stack_size)
* Thread stack size to use (in bytes).
-* [size_t tensorflow::ThreadOptions::guard_size](#size_t_tensorflow_ThreadOptions_guard_size)
+* [`size_t tensorflow::ThreadOptions::guard_size`](#size_t_tensorflow_ThreadOptions_guard_size)
* Guard area size to use near thread stacks to use (in bytes)
##Member Details <a class="md-anchor" id="AUTOGENERATED-member-details"></a>
-#### size_t tensorflow::ThreadOptions::stack_size <a class="md-anchor" id="size_t_tensorflow_ThreadOptions_stack_size"></a>
+#### `size_t tensorflow::ThreadOptions::stack_size` <a class="md-anchor" id="size_t_tensorflow_ThreadOptions_stack_size"></a>
Thread stack size to use (in bytes).
-#### size_t tensorflow::ThreadOptions::guard_size <a class="md-anchor" id="size_t_tensorflow_ThreadOptions_guard_size"></a>
+#### `size_t tensorflow::ThreadOptions::guard_size` <a class="md-anchor" id="size_t_tensorflow_ThreadOptions_guard_size"></a>
Guard area size to use near thread stacks to use (in bytes)
diff --git a/tensorflow/g3doc/api_docs/cc/index.md b/tensorflow/g3doc/api_docs/cc/index.md
index 9a3a75534b..00da48cde9 100644
--- a/tensorflow/g3doc/api_docs/cc/index.md
+++ b/tensorflow/g3doc/api_docs/cc/index.md
@@ -4,7 +4,7 @@ TensorFlow's public C++ API includes only the API for executing graphs, as of
version 0.5. To control the execution of a graph from C++:
1. Build the computation graph using the [Python API](../python/).
-1. Use [tf.train.write_graph()](../python/train.md?cl=head#write_graph) to
+1. Use [tf.train.write_graph()](../python/train.md#write_graph) to
write the graph to a file.
1. Load the graph using the C++ Session API. For example:
@@ -67,9 +67,3 @@ write the graph to a file.
<!-- StructThreadOptions.md -->
-->
</div>
-
-
-
-
-
-
diff --git a/tensorflow/g3doc/api_docs/python/array_ops.md b/tensorflow/g3doc/api_docs/python/array_ops.md
index 9d68da7caa..44b32ecc8e 100644
--- a/tensorflow/g3doc/api_docs/python/array_ops.md
+++ b/tensorflow/g3doc/api_docs/python/array_ops.md
@@ -9,34 +9,34 @@ accepted by [`tf.convert_to_tensor`](framework.md#convert_to_tensor).
## Contents
### [Tensor Transformations](#AUTOGENERATED-tensor-transformations)
* [Casting](#AUTOGENERATED-casting)
- * [tf.string_to_number(string_tensor, out_type=None, name=None)](#string_to_number)
- * [tf.to_double(x, name='ToDouble')](#to_double)
- * [tf.to_float(x, name='ToFloat')](#to_float)
- * [tf.to_bfloat16(x, name='ToBFloat16')](#to_bfloat16)
- * [tf.to_int32(x, name='ToInt32')](#to_int32)
- * [tf.to_int64(x, name='ToInt64')](#to_int64)
- * [tf.cast(x, dtype, name=None)](#cast)
+ * [`tf.string_to_number(string_tensor, out_type=None, name=None)`](#string_to_number)
+ * [`tf.to_double(x, name='ToDouble')`](#to_double)
+ * [`tf.to_float(x, name='ToFloat')`](#to_float)
+ * [`tf.to_bfloat16(x, name='ToBFloat16')`](#to_bfloat16)
+ * [`tf.to_int32(x, name='ToInt32')`](#to_int32)
+ * [`tf.to_int64(x, name='ToInt64')`](#to_int64)
+ * [`tf.cast(x, dtype, name=None)`](#cast)
* [Shapes and Shaping](#AUTOGENERATED-shapes-and-shaping)
- * [tf.shape(input, name=None)](#shape)
- * [tf.size(input, name=None)](#size)
- * [tf.rank(input, name=None)](#rank)
- * [tf.reshape(tensor, shape, name=None)](#reshape)
- * [tf.squeeze(input, squeeze_dims=None, name=None)](#squeeze)
- * [tf.expand_dims(input, dim, name=None)](#expand_dims)
+ * [`tf.shape(input, name=None)`](#shape)
+ * [`tf.size(input, name=None)`](#size)
+ * [`tf.rank(input, name=None)`](#rank)
+ * [`tf.reshape(tensor, shape, name=None)`](#reshape)
+ * [`tf.squeeze(input, squeeze_dims=None, name=None)`](#squeeze)
+ * [`tf.expand_dims(input, dim, name=None)`](#expand_dims)
* [Slicing and Joining](#AUTOGENERATED-slicing-and-joining)
- * [tf.slice(input_, begin, size, name=None)](#slice)
- * [tf.split(split_dim, num_split, value, name='split')](#split)
- * [tf.tile(input, multiples, name=None)](#tile)
- * [tf.pad(input, paddings, name=None)](#pad)
- * [tf.concat(concat_dim, values, name='concat')](#concat)
- * [tf.pack(values, name='pack')](#pack)
- * [tf.unpack(value, num=None, name='unpack')](#unpack)
- * [tf.reverse_sequence(input, seq_lengths, seq_dim, name=None)](#reverse_sequence)
- * [tf.reverse(tensor, dims, name=None)](#reverse)
- * [tf.transpose(a, perm=None, name='transpose')](#transpose)
- * [tf.gather(params, indices, name=None)](#gather)
- * [tf.dynamic_partition(data, partitions, num_partitions, name=None)](#dynamic_partition)
- * [tf.dynamic_stitch(indices, data, name=None)](#dynamic_stitch)
+ * [`tf.slice(input_, begin, size, name=None)`](#slice)
+ * [`tf.split(split_dim, num_split, value, name='split')`](#split)
+ * [`tf.tile(input, multiples, name=None)`](#tile)
+ * [`tf.pad(input, paddings, name=None)`](#pad)
+ * [`tf.concat(concat_dim, values, name='concat')`](#concat)
+ * [`tf.pack(values, name='pack')`](#pack)
+ * [`tf.unpack(value, num=None, name='unpack')`](#unpack)
+ * [`tf.reverse_sequence(input, seq_lengths, seq_dim, name=None)`](#reverse_sequence)
+ * [`tf.reverse(tensor, dims, name=None)`](#reverse)
+ * [`tf.transpose(a, perm=None, name='transpose')`](#transpose)
+ * [`tf.gather(params, indices, name=None)`](#gather)
+ * [`tf.dynamic_partition(data, partitions, num_partitions, name=None)`](#dynamic_partition)
+ * [`tf.dynamic_stitch(indices, data, name=None)`](#dynamic_stitch)
<!-- TOC-END This section was generated by neural network, THANKS FOR READING! -->
@@ -48,7 +48,7 @@ types in your graph.
- - -
-### tf.string_to_number(string_tensor, out_type=None, name=None) <a class="md-anchor" id="string_to_number"></a>
+### `tf.string_to_number(string_tensor, out_type=None, name=None)` <a class="md-anchor" id="string_to_number"></a>
Converts each string in the input Tensor to the specified numeric type.
@@ -71,7 +71,7 @@ results in a rounded value.)
- - -
-### tf.to_double(x, name='ToDouble') <a class="md-anchor" id="to_double"></a>
+### `tf.to_double(x, name='ToDouble')` <a class="md-anchor" id="to_double"></a>
Casts a tensor to type `float64`.
@@ -93,7 +93,7 @@ Casts a tensor to type `float64`.
- - -
-### tf.to_float(x, name='ToFloat') <a class="md-anchor" id="to_float"></a>
+### `tf.to_float(x, name='ToFloat')` <a class="md-anchor" id="to_float"></a>
Casts a tensor to type `float32`.
@@ -115,7 +115,7 @@ Casts a tensor to type `float32`.
- - -
-### tf.to_bfloat16(x, name='ToBFloat16') <a class="md-anchor" id="to_bfloat16"></a>
+### `tf.to_bfloat16(x, name='ToBFloat16')` <a class="md-anchor" id="to_bfloat16"></a>
Casts a tensor to type `bfloat16`.
@@ -137,7 +137,7 @@ Casts a tensor to type `bfloat16`.
- - -
-### tf.to_int32(x, name='ToInt32') <a class="md-anchor" id="to_int32"></a>
+### `tf.to_int32(x, name='ToInt32')` <a class="md-anchor" id="to_int32"></a>
Casts a tensor to type `int32`.
@@ -159,7 +159,7 @@ Casts a tensor to type `int32`.
- - -
-### tf.to_int64(x, name='ToInt64') <a class="md-anchor" id="to_int64"></a>
+### `tf.to_int64(x, name='ToInt64')` <a class="md-anchor" id="to_int64"></a>
Casts a tensor to type `int64`.
@@ -181,7 +181,7 @@ Casts a tensor to type `int64`.
- - -
-### tf.cast(x, dtype, name=None) <a class="md-anchor" id="cast"></a>
+### `tf.cast(x, dtype, name=None)` <a class="md-anchor" id="cast"></a>
Casts a tensor to a new type.
@@ -220,7 +220,7 @@ of a tensor and change the shape of a tensor.
- - -
-### tf.shape(input, name=None) <a class="md-anchor" id="shape"></a>
+### `tf.shape(input, name=None)` <a class="md-anchor" id="shape"></a>
Returns the shape of a tensor.
@@ -246,7 +246,7 @@ shape(t) ==> [2, 2, 3]
- - -
-### tf.size(input, name=None) <a class="md-anchor" id="size"></a>
+### `tf.size(input, name=None)` <a class="md-anchor" id="size"></a>
Returns the size of a tensor.
@@ -273,7 +273,7 @@ size(t) ==> 12
- - -
-### tf.rank(input, name=None) <a class="md-anchor" id="rank"></a>
+### `tf.rank(input, name=None)` <a class="md-anchor" id="rank"></a>
Returns the rank of a tensor.
@@ -304,7 +304,7 @@ of the tensor. Rank is also known as "order", "degree", or "ndims."
- - -
-### tf.reshape(tensor, shape, name=None) <a class="md-anchor" id="reshape"></a>
+### `tf.reshape(tensor, shape, name=None)` <a class="md-anchor" id="reshape"></a>
Reshapes a tensor.
@@ -358,7 +358,7 @@ reshape(t, [-1]) ==> [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6]
- - -
-### tf.squeeze(input, squeeze_dims=None, name=None) <a class="md-anchor" id="squeeze"></a>
+### `tf.squeeze(input, squeeze_dims=None, name=None)` <a class="md-anchor" id="squeeze"></a>
Removes dimensions of size 1 from the shape of a tensor.
@@ -399,7 +399,7 @@ shape(squeeze(t, [2, 4])) ==> [1, 2, 3, 1]
- - -
-### tf.expand_dims(input, dim, name=None) <a class="md-anchor" id="expand_dims"></a>
+### `tf.expand_dims(input, dim, name=None)` <a class="md-anchor" id="expand_dims"></a>
Inserts a dimension of 1 into a tensor's shape.
@@ -458,7 +458,7 @@ or join multiple tensors together.
- - -
-### tf.slice(input_, begin, size, name=None) <a class="md-anchor" id="slice"></a>
+### `tf.slice(input_, begin, size, name=None)` <a class="md-anchor" id="slice"></a>
Extracts a slice from a tensor.
@@ -508,7 +508,7 @@ tf.slice(input, [1, 0, 0], [2, 1, 3]) ==> [[[3, 3, 3]],
- - -
-### tf.split(split_dim, num_split, value, name='split') <a class="md-anchor" id="split"></a>
+### `tf.split(split_dim, num_split, value, name='split')` <a class="md-anchor" id="split"></a>
Splits a tensor into `num_split` tensors along one dimension.
@@ -540,7 +540,7 @@ tf.shape(split0) ==> [5, 10]
- - -
-### tf.tile(input, multiples, name=None) <a class="md-anchor" id="tile"></a>
+### `tf.tile(input, multiples, name=None)` <a class="md-anchor" id="tile"></a>
Constructs a tensor by tiling a given tensor.
@@ -565,7 +565,7 @@ dimension. For example, tiling `[a b c d]` by `[2]` produces
- - -
-### tf.pad(input, paddings, name=None) <a class="md-anchor" id="pad"></a>
+### `tf.pad(input, paddings, name=None)` <a class="md-anchor" id="pad"></a>
Pads a tensor with zeros.
@@ -607,7 +607,7 @@ pad(t, paddings) ==> [[0, 0, 0, 0, 0]
- - -
-### tf.concat(concat_dim, values, name='concat') <a class="md-anchor" id="concat"></a>
+### `tf.concat(concat_dim, values, name='concat')` <a class="md-anchor" id="concat"></a>
Concatenates tensors along one dimension.
@@ -655,7 +655,7 @@ tf.shape(tf.concat(1, [t3, t4])) ==> [2, 6]
- - -
-### tf.pack(values, name='pack') <a class="md-anchor" id="pack"></a>
+### `tf.pack(values, name='pack')` <a class="md-anchor" id="pack"></a>
Packs a list of rank-`R` tensors into one rank-`(R+1)` tensor.
@@ -681,7 +681,7 @@ This is the opposite of unpack. The numpy equivalent is
- - -
-### tf.unpack(value, num=None, name='unpack') <a class="md-anchor" id="unpack"></a>
+### `tf.unpack(value, num=None, name='unpack')` <a class="md-anchor" id="unpack"></a>
Unpacks the outer dimension of a rank-`R` tensor into rank-`(R-1)` tensors.
@@ -716,7 +716,7 @@ This is the opposite of pack. The numpy equivalent is
- - -
-### tf.reverse_sequence(input, seq_lengths, seq_dim, name=None) <a class="md-anchor" id="reverse_sequence"></a>
+### `tf.reverse_sequence(input, seq_lengths, seq_dim, name=None)` <a class="md-anchor" id="reverse_sequence"></a>
Reverses variable length slices in dimension `seq_dim`.
@@ -768,7 +768,7 @@ output[3, 2:, :, ...] = input[3, 2:, :, ...]
- - -
-### tf.reverse(tensor, dims, name=None) <a class="md-anchor" id="reverse"></a>
+### `tf.reverse(tensor, dims, name=None)` <a class="md-anchor" id="reverse"></a>
Reverses specific dimensions of a tensor.
@@ -832,7 +832,7 @@ reverse(t, dims) ==> [[[[8, 9, 10, 11],
- - -
-### tf.transpose(a, perm=None, name='transpose') <a class="md-anchor" id="transpose"></a>
+### `tf.transpose(a, perm=None, name='transpose')` <a class="md-anchor" id="transpose"></a>
Transposes `a`. Permutes the dimensions according to `perm`.
@@ -884,7 +884,7 @@ tf.transpose(b, perm=[0, 2, 1]) ==> [[[1 4]
- - -
-### tf.gather(params, indices, name=None) <a class="md-anchor" id="gather"></a>
+### `tf.gather(params, indices, name=None)` <a class="md-anchor" id="gather"></a>
Gather slices from `params` according to `indices`.
@@ -921,7 +921,7 @@ this operation will permute `params` accordingly.
- - -
-### tf.dynamic_partition(data, partitions, num_partitions, name=None) <a class="md-anchor" id="dynamic_partition"></a>
+### `tf.dynamic_partition(data, partitions, num_partitions, name=None)` <a class="md-anchor" id="dynamic_partition"></a>
Partitions `data` into `num_partitions` tensors using indices from `partitions`.
@@ -974,7 +974,7 @@ For example:
- - -
-### tf.dynamic_stitch(indices, data, name=None) <a class="md-anchor" id="dynamic_stitch"></a>
+### `tf.dynamic_stitch(indices, data, name=None)` <a class="md-anchor" id="dynamic_stitch"></a>
Interleave the values from the `data` tensors into a single tensor.
diff --git a/tensorflow/g3doc/api_docs/python/client.md b/tensorflow/g3doc/api_docs/python/client.md
index 3da41016a1..738225a656 100644
--- a/tensorflow/g3doc/api_docs/python/client.md
+++ b/tensorflow/g3doc/api_docs/python/client.md
@@ -7,7 +7,7 @@
* [Session management](#AUTOGENERATED-session-management)
* [class tf.Session](#Session)
* [class tf.InteractiveSession](#InteractiveSession)
- * [tf.get_default_session()](#get_default_session)
+ * [`tf.get_default_session()`](#get_default_session)
* [Error classes](#AUTOGENERATED-error-classes)
* [class tf.OpError](#OpError)
* [class tf.errors.CancelledError](#CancelledError)
@@ -95,7 +95,7 @@ sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
- - -
-#### tf.Session.__init__(target='', graph=None, config=None) <a class="md-anchor" id="Session.__init__"></a>
+#### `tf.Session.__init__(target='', graph=None, config=None)` <a class="md-anchor" id="Session.__init__"></a>
Creates a new TensorFlow session.
@@ -120,7 +120,7 @@ the session constructor.
- - -
-#### tf.Session.run(fetches, feed_dict=None) <a class="md-anchor" id="Session.run"></a>
+#### `tf.Session.run(fetches, feed_dict=None)` <a class="md-anchor" id="Session.run"></a>
Runs the operations and evaluates the tensors in `fetches`.
@@ -184,7 +184,7 @@ one of the following types:
- - -
-#### tf.Session.close() <a class="md-anchor" id="Session.close"></a>
+#### `tf.Session.close()` <a class="md-anchor" id="Session.close"></a>
Closes this session.
@@ -199,14 +199,14 @@ Calling this method frees all resources associated with the session.
- - -
-#### tf.Session.graph <a class="md-anchor" id="Session.graph"></a>
+#### `tf.Session.graph` <a class="md-anchor" id="Session.graph"></a>
The graph that was launched in this session.
- - -
-#### tf.Session.as_default() <a class="md-anchor" id="Session.as_default"></a>
+#### `tf.Session.as_default()` <a class="md-anchor" id="Session.as_default"></a>
Returns a context manager that makes this object the default session.
@@ -302,7 +302,7 @@ with tf.Session():
- - -
-#### tf.InteractiveSession.__init__(target='', graph=None) <a class="md-anchor" id="InteractiveSession.__init__"></a>
+#### `tf.InteractiveSession.__init__(target='', graph=None)` <a class="md-anchor" id="InteractiveSession.__init__"></a>
Creates a new interactive TensorFlow session.
@@ -325,7 +325,7 @@ the session constructor.
- - -
-#### tf.InteractiveSession.close() <a class="md-anchor" id="InteractiveSession.close"></a>
+#### `tf.InteractiveSession.close()` <a class="md-anchor" id="InteractiveSession.close"></a>
Closes an `InteractiveSession`.
@@ -334,7 +334,7 @@ Closes an `InteractiveSession`.
- - -
-### tf.get_default_session() <a class="md-anchor" id="get_default_session"></a>
+### `tf.get_default_session()` <a class="md-anchor" id="get_default_session"></a>
Returns the default session for the current thread.
@@ -365,7 +365,7 @@ of `OpError` from the `tf.errors` module.
- - -
-#### tf.OpError.op <a class="md-anchor" id="OpError.op"></a>
+#### `tf.OpError.op` <a class="md-anchor" id="OpError.op"></a>
The operation that failed, if known.
@@ -382,7 +382,7 @@ op.
- - -
-#### tf.OpError.node_def <a class="md-anchor" id="OpError.node_def"></a>
+#### `tf.OpError.node_def` <a class="md-anchor" id="OpError.node_def"></a>
The `NodeDef` proto representing the op that failed.
@@ -390,7 +390,7 @@ The `NodeDef` proto representing the op that failed.
#### Other Methods <a class="md-anchor" id="AUTOGENERATED-other-methods"></a>
- - -
-#### tf.OpError.__init__(node_def, op, message, error_code) <a class="md-anchor" id="OpError.__init__"></a>
+#### `tf.OpError.__init__(node_def, op, message, error_code)` <a class="md-anchor" id="OpError.__init__"></a>
Creates a new OpError indicating that a particular op failed.
@@ -405,13 +405,13 @@ Creates a new OpError indicating that a particular op failed.
- - -
-#### tf.OpError.error_code <a class="md-anchor" id="OpError.error_code"></a>
+#### `tf.OpError.error_code` <a class="md-anchor" id="OpError.error_code"></a>
The integer error code that describes the error.
- - -
-#### tf.OpError.message <a class="md-anchor" id="OpError.message"></a>
+#### `tf.OpError.message` <a class="md-anchor" id="OpError.message"></a>
The error message that describes the error.
@@ -431,7 +431,7 @@ running such a long-running operation will fail by raising `CancelledError`.
- - -
-#### tf.errors.CancelledError.__init__(node_def, op, message) <a class="md-anchor" id="CancelledError.__init__"></a>
+#### `tf.errors.CancelledError.__init__(node_def, op, message)` <a class="md-anchor" id="CancelledError.__init__"></a>
Creates a `CancelledError`.
@@ -451,7 +451,7 @@ error.
- - -
-#### tf.errors.UnknownError.__init__(node_def, op, message, error_code=2) <a class="md-anchor" id="UnknownError.__init__"></a>
+#### `tf.errors.UnknownError.__init__(node_def, op, message, error_code=2)` <a class="md-anchor" id="UnknownError.__init__"></a>
Creates an `UnknownError`.
@@ -473,7 +473,7 @@ tensor.
- - -
-#### tf.errors.InvalidArgumentError.__init__(node_def, op, message) <a class="md-anchor" id="InvalidArgumentError.__init__"></a>
+#### `tf.errors.InvalidArgumentError.__init__(node_def, op, message)` <a class="md-anchor" id="InvalidArgumentError.__init__"></a>
Creates an `InvalidArgumentError`.
@@ -489,7 +489,7 @@ This exception is not currently used.
- - -
-#### tf.errors.DeadlineExceededError.__init__(node_def, op, message) <a class="md-anchor" id="DeadlineExceededError.__init__"></a>
+#### `tf.errors.DeadlineExceededError.__init__(node_def, op, message)` <a class="md-anchor" id="DeadlineExceededError.__init__"></a>
Creates a `DeadlineExceededError`.
@@ -508,7 +508,7 @@ does not exist.
- - -
-#### tf.errors.NotFoundError.__init__(node_def, op, message) <a class="md-anchor" id="NotFoundError.__init__"></a>
+#### `tf.errors.NotFoundError.__init__(node_def, op, message)` <a class="md-anchor" id="NotFoundError.__init__"></a>
Creates a `NotFoundError`.
@@ -527,7 +527,7 @@ existing file was passed.
- - -
-#### tf.errors.AlreadyExistsError.__init__(node_def, op, message) <a class="md-anchor" id="AlreadyExistsError.__init__"></a>
+#### `tf.errors.AlreadyExistsError.__init__(node_def, op, message)` <a class="md-anchor" id="AlreadyExistsError.__init__"></a>
Creates an `AlreadyExistsError`.
@@ -546,7 +546,7 @@ file for which the user does not have the read file permission.
- - -
-#### tf.errors.PermissionDeniedError.__init__(node_def, op, message) <a class="md-anchor" id="PermissionDeniedError.__init__"></a>
+#### `tf.errors.PermissionDeniedError.__init__(node_def, op, message)` <a class="md-anchor" id="PermissionDeniedError.__init__"></a>
Creates a `PermissionDeniedError`.
@@ -562,7 +562,7 @@ This exception is not currently used.
- - -
-#### tf.errors.UnauthenticatedError.__init__(node_def, op, message) <a class="md-anchor" id="UnauthenticatedError.__init__"></a>
+#### `tf.errors.UnauthenticatedError.__init__(node_def, op, message)` <a class="md-anchor" id="UnauthenticatedError.__init__"></a>
Creates an `UnauthenticatedError`.
@@ -579,7 +579,7 @@ exhausted, or perhaps the entire file system is out of space.
- - -
-#### tf.errors.ResourceExhaustedError.__init__(node_def, op, message) <a class="md-anchor" id="ResourceExhaustedError.__init__"></a>
+#### `tf.errors.ResourceExhaustedError.__init__(node_def, op, message)` <a class="md-anchor" id="ResourceExhaustedError.__init__"></a>
Creates a `ResourceExhaustedError`.
@@ -597,7 +597,7 @@ been initialized.
- - -
-#### tf.errors.FailedPreconditionError.__init__(node_def, op, message) <a class="md-anchor" id="FailedPreconditionError.__init__"></a>
+#### `tf.errors.FailedPreconditionError.__init__(node_def, op, message)` <a class="md-anchor" id="FailedPreconditionError.__init__"></a>
Creates a `FailedPreconditionError`.
@@ -615,7 +615,7 @@ operation may raise `AbortedError` if a
- - -
-#### tf.errors.AbortedError.__init__(node_def, op, message) <a class="md-anchor" id="AbortedError.__init__"></a>
+#### `tf.errors.AbortedError.__init__(node_def, op, message)` <a class="md-anchor" id="AbortedError.__init__"></a>
Creates an `AbortedError`.
@@ -634,7 +634,7 @@ blocked on an empty queue, and a
- - -
-#### tf.errors.OutOfRangeError.__init__(node_def, op, message) <a class="md-anchor" id="OutOfRangeError.__init__"></a>
+#### `tf.errors.OutOfRangeError.__init__(node_def, op, message)` <a class="md-anchor" id="OutOfRangeError.__init__"></a>
Creates an `OutOfRangeError`.
@@ -654,7 +654,7 @@ is not yet supported.
- - -
-#### tf.errors.UnimplementedError.__init__(node_def, op, message) <a class="md-anchor" id="UnimplementedError.__init__"></a>
+#### `tf.errors.UnimplementedError.__init__(node_def, op, message)` <a class="md-anchor" id="UnimplementedError.__init__"></a>
Creates an `UnimplementedError`.
@@ -671,7 +671,7 @@ has been broken. Catching this exception is not recommended.
- - -
-#### tf.errors.InternalError.__init__(node_def, op, message) <a class="md-anchor" id="InternalError.__init__"></a>
+#### `tf.errors.InternalError.__init__(node_def, op, message)` <a class="md-anchor" id="InternalError.__init__"></a>
Creates an `InternalError`.
@@ -687,7 +687,7 @@ This exception is not currently used.
- - -
-#### tf.errors.UnavailableError.__init__(node_def, op, message) <a class="md-anchor" id="UnavailableError.__init__"></a>
+#### `tf.errors.UnavailableError.__init__(node_def, op, message)` <a class="md-anchor" id="UnavailableError.__init__"></a>
Creates an `UnavailableError`.
@@ -705,7 +705,7 @@ if the file is truncated while it is being read.
- - -
-#### tf.errors.DataLossError.__init__(node_def, op, message) <a class="md-anchor" id="DataLossError.__init__"></a>
+#### `tf.errors.DataLossError.__init__(node_def, op, message)` <a class="md-anchor" id="DataLossError.__init__"></a>
Creates a `DataLossError`.
diff --git a/tensorflow/g3doc/api_docs/python/constant_op.md b/tensorflow/g3doc/api_docs/python/constant_op.md
index b1a53b1a0c..59053a576e 100644
--- a/tensorflow/g3doc/api_docs/python/constant_op.md
+++ b/tensorflow/g3doc/api_docs/python/constant_op.md
@@ -9,22 +9,22 @@ accepted by [`tf.convert_to_tensor`](framework.md#convert_to_tensor).
## Contents
### [Constants, Sequences, and Random Values](#AUTOGENERATED-constants--sequences--and-random-values)
* [Constant Value Tensors](#AUTOGENERATED-constant-value-tensors)
- * [tf.zeros(shape, dtype=tf.float32, name=None)](#zeros)
- * [tf.zeros_like(tensor, dtype=None, name=None)](#zeros_like)
- * [tf.ones(shape, dtype=tf.float32, name=None)](#ones)
- * [tf.ones_like(tensor, dtype=None, name=None)](#ones_like)
- * [tf.fill(dims, value, name=None)](#fill)
- * [tf.constant(value, dtype=None, shape=None, name='Const')](#constant)
+ * [`tf.zeros(shape, dtype=tf.float32, name=None)`](#zeros)
+ * [`tf.zeros_like(tensor, dtype=None, name=None)`](#zeros_like)
+ * [`tf.ones(shape, dtype=tf.float32, name=None)`](#ones)
+ * [`tf.ones_like(tensor, dtype=None, name=None)`](#ones_like)
+ * [`tf.fill(dims, value, name=None)`](#fill)
+ * [`tf.constant(value, dtype=None, shape=None, name='Const')`](#constant)
* [Sequences](#AUTOGENERATED-sequences)
- * [tf.linspace(start, stop, num, name=None)](#linspace)
- * [tf.range(start, limit, delta=1, name='range')](#range)
+ * [`tf.linspace(start, stop, num, name=None)`](#linspace)
+ * [`tf.range(start, limit, delta=1, name='range')`](#range)
* [Random Tensors](#AUTOGENERATED-random-tensors)
* [Examples:](#AUTOGENERATED-examples-)
- * [tf.random_normal(shape, mean=0.0, stddev=1.0, dtype=tf.float32, seed=None, name=None)](#random_normal)
- * [tf.truncated_normal(shape, mean=0.0, stddev=1.0, dtype=tf.float32, seed=None, name=None)](#truncated_normal)
- * [tf.random_uniform(shape, minval=0.0, maxval=1.0, dtype=tf.float32, seed=None, name=None)](#random_uniform)
- * [tf.random_shuffle(value, seed=None, name=None)](#random_shuffle)
- * [tf.set_random_seed(seed)](#set_random_seed)
+ * [`tf.random_normal(shape, mean=0.0, stddev=1.0, dtype=tf.float32, seed=None, name=None)`](#random_normal)
+ * [`tf.truncated_normal(shape, mean=0.0, stddev=1.0, dtype=tf.float32, seed=None, name=None)`](#truncated_normal)
+ * [`tf.random_uniform(shape, minval=0.0, maxval=1.0, dtype=tf.float32, seed=None, name=None)`](#random_uniform)
+ * [`tf.random_shuffle(value, seed=None, name=None)`](#random_shuffle)
+ * [`tf.set_random_seed(seed)`](#set_random_seed)
<!-- TOC-END This section was generated by neural network, THANKS FOR READING! -->
@@ -35,7 +35,7 @@ TensorFlow provides several operations that you can use to generate constants.
- - -
-### tf.zeros(shape, dtype=tf.float32, name=None) <a class="md-anchor" id="zeros"></a>
+### `tf.zeros(shape, dtype=tf.float32, name=None)` <a class="md-anchor" id="zeros"></a>
Creates a tensor with all elements set to zero.
@@ -62,7 +62,7 @@ tf.zeros([3, 4], int32) ==> [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
- - -
-### tf.zeros_like(tensor, dtype=None, name=None) <a class="md-anchor" id="zeros_like"></a>
+### `tf.zeros_like(tensor, dtype=None, name=None)` <a class="md-anchor" id="zeros_like"></a>
Creates a tensor with all elements set to zero.
@@ -94,7 +94,7 @@ tf.zeros_like(tensor) ==> [[0, 0, 0], [0, 0, 0]]
- - -
-### tf.ones(shape, dtype=tf.float32, name=None) <a class="md-anchor" id="ones"></a>
+### `tf.ones(shape, dtype=tf.float32, name=None)` <a class="md-anchor" id="ones"></a>
Creates a tensor with all elements set to 1.
@@ -121,7 +121,7 @@ tf.ones([2, 3], int32) ==> [[1, 1, 1], [1, 1, 1]]
- - -
-### tf.ones_like(tensor, dtype=None, name=None) <a class="md-anchor" id="ones_like"></a>
+### `tf.ones_like(tensor, dtype=None, name=None)` <a class="md-anchor" id="ones_like"></a>
Creates a tensor with all elements set to 1.
@@ -153,7 +153,7 @@ tf.ones_like(tensor) ==> [[1, 1, 1], [1, 1, 1]]
- - -
-### tf.fill(dims, value, name=None) <a class="md-anchor" id="fill"></a>
+### `tf.fill(dims, value, name=None)` <a class="md-anchor" id="fill"></a>
Creates a tensor filled with a scalar value.
@@ -184,7 +184,7 @@ fill(dims, 9) ==> [[9, 9, 9]
- - -
-### tf.constant(value, dtype=None, shape=None, name='Const') <a class="md-anchor" id="constant"></a>
+### `tf.constant(value, dtype=None, shape=None, name='Const')` <a class="md-anchor" id="constant"></a>
Creates a constant tensor.
@@ -241,7 +241,7 @@ Creates a constant tensor.
- - -
-### tf.linspace(start, stop, num, name=None) <a class="md-anchor" id="linspace"></a>
+### `tf.linspace(start, stop, num, name=None)` <a class="md-anchor" id="linspace"></a>
Generates values in an interval.
@@ -273,7 +273,7 @@ tf.linspace(10.0, 12.0, 3, name="linspace") => [ 10.0 11.0 12.0]
- - -
-### tf.range(start, limit, delta=1, name='range') <a class="md-anchor" id="range"></a>
+### `tf.range(start, limit, delta=1, name='range')` <a class="md-anchor" id="range"></a>
Creates a sequence of integers.
@@ -359,7 +359,7 @@ print sess.run(var)
- - -
-### tf.random_normal(shape, mean=0.0, stddev=1.0, dtype=tf.float32, seed=None, name=None) <a class="md-anchor" id="random_normal"></a>
+### `tf.random_normal(shape, mean=0.0, stddev=1.0, dtype=tf.float32, seed=None, name=None)` <a class="md-anchor" id="random_normal"></a>
Outputs random values from a normal distribution.
@@ -383,7 +383,7 @@ Outputs random values from a normal distribution.
- - -
-### tf.truncated_normal(shape, mean=0.0, stddev=1.0, dtype=tf.float32, seed=None, name=None) <a class="md-anchor" id="truncated_normal"></a>
+### `tf.truncated_normal(shape, mean=0.0, stddev=1.0, dtype=tf.float32, seed=None, name=None)` <a class="md-anchor" id="truncated_normal"></a>
Outputs random values from a truncated normal distribution.
@@ -411,7 +411,7 @@ deviations from the mean are dropped and re-picked.
- - -
-### tf.random_uniform(shape, minval=0.0, maxval=1.0, dtype=tf.float32, seed=None, name=None) <a class="md-anchor" id="random_uniform"></a>
+### `tf.random_uniform(shape, minval=0.0, maxval=1.0, dtype=tf.float32, seed=None, name=None)` <a class="md-anchor" id="random_uniform"></a>
Outputs random values from a uniform distribution.
@@ -439,7 +439,7 @@ the upper bound `maxval` is excluded.
- - -
-### tf.random_shuffle(value, seed=None, name=None) <a class="md-anchor" id="random_shuffle"></a>
+### `tf.random_shuffle(value, seed=None, name=None)` <a class="md-anchor" id="random_shuffle"></a>
Randomly shuffles a tensor along its first dimension.
@@ -469,7 +469,7 @@ to one and only one `output[i]`. For example, a mapping that might occur for a
- - -
-### tf.set_random_seed(seed) <a class="md-anchor" id="set_random_seed"></a>
+### `tf.set_random_seed(seed)` <a class="md-anchor" id="set_random_seed"></a>
Sets the graph-level random seed.
diff --git a/tensorflow/g3doc/api_docs/python/control_flow_ops.md b/tensorflow/g3doc/api_docs/python/control_flow_ops.md
index f3245e6957..eda97dc222 100644
--- a/tensorflow/g3doc/api_docs/python/control_flow_ops.md
+++ b/tensorflow/g3doc/api_docs/python/control_flow_ops.md
@@ -9,34 +9,34 @@ accepted by [`tf.convert_to_tensor`](framework.md#convert_to_tensor).
## Contents
### [Control Flow](#AUTOGENERATED-control-flow)
* [Control Flow Operations](#AUTOGENERATED-control-flow-operations)
- * [tf.identity(input, name=None)](#identity)
- * [tf.tuple(tensors, name=None, control_inputs=None)](#tuple)
- * [tf.group(*inputs, **kwargs)](#group)
- * [tf.no_op(name=None)](#no_op)
- * [tf.count_up_to(ref, limit, name=None)](#count_up_to)
+ * [`tf.identity(input, name=None)`](#identity)
+ * [`tf.tuple(tensors, name=None, control_inputs=None)`](#tuple)
+ * [`tf.group(*inputs, **kwargs)`](#group)
+ * [`tf.no_op(name=None)`](#no_op)
+ * [`tf.count_up_to(ref, limit, name=None)`](#count_up_to)
* [Logical Operators](#AUTOGENERATED-logical-operators)
- * [tf.logical_and(x, y, name=None)](#logical_and)
- * [tf.logical_not(x, name=None)](#logical_not)
- * [tf.logical_or(x, y, name=None)](#logical_or)
- * [tf.logical_xor(x, y, name='LogicalXor')](#logical_xor)
+ * [`tf.logical_and(x, y, name=None)`](#logical_and)
+ * [`tf.logical_not(x, name=None)`](#logical_not)
+ * [`tf.logical_or(x, y, name=None)`](#logical_or)
+ * [`tf.logical_xor(x, y, name='LogicalXor')`](#logical_xor)
* [Comparison Operators](#AUTOGENERATED-comparison-operators)
- * [tf.equal(x, y, name=None)](#equal)
- * [tf.not_equal(x, y, name=None)](#not_equal)
- * [tf.less(x, y, name=None)](#less)
- * [tf.less_equal(x, y, name=None)](#less_equal)
- * [tf.greater(x, y, name=None)](#greater)
- * [tf.greater_equal(x, y, name=None)](#greater_equal)
- * [tf.select(condition, t, e, name=None)](#select)
- * [tf.where(input, name=None)](#where)
+ * [`tf.equal(x, y, name=None)`](#equal)
+ * [`tf.not_equal(x, y, name=None)`](#not_equal)
+ * [`tf.less(x, y, name=None)`](#less)
+ * [`tf.less_equal(x, y, name=None)`](#less_equal)
+ * [`tf.greater(x, y, name=None)`](#greater)
+ * [`tf.greater_equal(x, y, name=None)`](#greater_equal)
+ * [`tf.select(condition, t, e, name=None)`](#select)
+ * [`tf.where(input, name=None)`](#where)
* [Debugging Operations](#AUTOGENERATED-debugging-operations)
- * [tf.is_finite(x, name=None)](#is_finite)
- * [tf.is_inf(x, name=None)](#is_inf)
- * [tf.is_nan(x, name=None)](#is_nan)
- * [tf.verify_tensor_all_finite(t, msg, name=None)](#verify_tensor_all_finite)
- * [tf.check_numerics(tensor, message, name=None)](#check_numerics)
- * [tf.add_check_numerics_ops()](#add_check_numerics_ops)
- * [tf.Assert(condition, data, summarize=None, name=None)](#Assert)
- * [tf.Print(input_, data, message=None, first_n=None, summarize=None, name=None)](#Print)
+ * [`tf.is_finite(x, name=None)`](#is_finite)
+ * [`tf.is_inf(x, name=None)`](#is_inf)
+ * [`tf.is_nan(x, name=None)`](#is_nan)
+ * [`tf.verify_tensor_all_finite(t, msg, name=None)`](#verify_tensor_all_finite)
+ * [`tf.check_numerics(tensor, message, name=None)`](#check_numerics)
+ * [`tf.add_check_numerics_ops()`](#add_check_numerics_ops)
+ * [`tf.Assert(condition, data, summarize=None, name=None)`](#Assert)
+ * [`tf.Print(input_, data, message=None, first_n=None, summarize=None, name=None)`](#Print)
<!-- TOC-END This section was generated by neural network, THANKS FOR READING! -->
@@ -48,7 +48,7 @@ the execution of operations and add conditional dependencies to your graph.
- - -
-### tf.identity(input, name=None) <a class="md-anchor" id="identity"></a>
+### `tf.identity(input, name=None)` <a class="md-anchor" id="identity"></a>
Return a tensor with the same shape and contents as the input tensor or value.
@@ -65,7 +65,7 @@ Return a tensor with the same shape and contents as the input tensor or value.
- - -
-### tf.tuple(tensors, name=None, control_inputs=None) <a class="md-anchor" id="tuple"></a>
+### `tf.tuple(tensors, name=None, control_inputs=None)` <a class="md-anchor" id="tuple"></a>
Group tensors together.
@@ -102,7 +102,7 @@ See also `group` and `with_dependencies`.
- - -
-### tf.group(*inputs, **kwargs) <a class="md-anchor" id="group"></a>
+### `tf.group(*inputs, **kwargs)` <a class="md-anchor" id="group"></a>
Create an op that groups multiple operations.
@@ -131,7 +131,7 @@ See also `tuple` and `with_dependencies`.
- - -
-### tf.no_op(name=None) <a class="md-anchor" id="no_op"></a>
+### `tf.no_op(name=None)` <a class="md-anchor" id="no_op"></a>
Does nothing. Only useful as a placeholder for control edges.
@@ -147,7 +147,7 @@ Does nothing. Only useful as a placeholder for control edges.
- - -
-### tf.count_up_to(ref, limit, name=None) <a class="md-anchor" id="count_up_to"></a>
+### `tf.count_up_to(ref, limit, name=None)` <a class="md-anchor" id="count_up_to"></a>
Increments 'ref' until it reaches 'limit'.
@@ -179,7 +179,7 @@ to your graph.
- - -
-### tf.logical_and(x, y, name=None) <a class="md-anchor" id="logical_and"></a>
+### `tf.logical_and(x, y, name=None)` <a class="md-anchor" id="logical_and"></a>
Returns the truth value of x AND y element-wise.
@@ -197,7 +197,7 @@ Returns the truth value of x AND y element-wise.
- - -
-### tf.logical_not(x, name=None) <a class="md-anchor" id="logical_not"></a>
+### `tf.logical_not(x, name=None)` <a class="md-anchor" id="logical_not"></a>
Returns the truth value of NOT x element-wise.
@@ -214,7 +214,7 @@ Returns the truth value of NOT x element-wise.
- - -
-### tf.logical_or(x, y, name=None) <a class="md-anchor" id="logical_or"></a>
+### `tf.logical_or(x, y, name=None)` <a class="md-anchor" id="logical_or"></a>
Returns the truth value of x OR y element-wise.
@@ -232,7 +232,7 @@ Returns the truth value of x OR y element-wise.
- - -
-### tf.logical_xor(x, y, name='LogicalXor') <a class="md-anchor" id="logical_xor"></a>
+### `tf.logical_xor(x, y, name='LogicalXor')` <a class="md-anchor" id="logical_xor"></a>
x ^ y = (x | y) & ~(x & y).
@@ -245,7 +245,7 @@ operators to your graph.
- - -
-### tf.equal(x, y, name=None) <a class="md-anchor" id="equal"></a>
+### `tf.equal(x, y, name=None)` <a class="md-anchor" id="equal"></a>
Returns the truth value of (x == y) element-wise.
@@ -263,7 +263,7 @@ Returns the truth value of (x == y) element-wise.
- - -
-### tf.not_equal(x, y, name=None) <a class="md-anchor" id="not_equal"></a>
+### `tf.not_equal(x, y, name=None)` <a class="md-anchor" id="not_equal"></a>
Returns the truth value of (x != y) element-wise.
@@ -281,7 +281,7 @@ Returns the truth value of (x != y) element-wise.
- - -
-### tf.less(x, y, name=None) <a class="md-anchor" id="less"></a>
+### `tf.less(x, y, name=None)` <a class="md-anchor" id="less"></a>
Returns the truth value of (x < y) element-wise.
@@ -299,7 +299,7 @@ Returns the truth value of (x < y) element-wise.
- - -
-### tf.less_equal(x, y, name=None) <a class="md-anchor" id="less_equal"></a>
+### `tf.less_equal(x, y, name=None)` <a class="md-anchor" id="less_equal"></a>
Returns the truth value of (x <= y) element-wise.
@@ -317,7 +317,7 @@ Returns the truth value of (x <= y) element-wise.
- - -
-### tf.greater(x, y, name=None) <a class="md-anchor" id="greater"></a>
+### `tf.greater(x, y, name=None)` <a class="md-anchor" id="greater"></a>
Returns the truth value of (x > y) element-wise.
@@ -335,7 +335,7 @@ Returns the truth value of (x > y) element-wise.
- - -
-### tf.greater_equal(x, y, name=None) <a class="md-anchor" id="greater_equal"></a>
+### `tf.greater_equal(x, y, name=None)` <a class="md-anchor" id="greater_equal"></a>
Returns the truth value of (x >= y) element-wise.
@@ -353,7 +353,7 @@ Returns the truth value of (x >= y) element-wise.
- - -
-### tf.select(condition, t, e, name=None) <a class="md-anchor" id="select"></a>
+### `tf.select(condition, t, e, name=None)` <a class="md-anchor" id="select"></a>
Selects elements from `t` or `e`, depending on `condition`.
@@ -391,7 +391,7 @@ select(condition, t, e) ==> [[1, 2],
- - -
-### tf.where(input, name=None) <a class="md-anchor" id="where"></a>
+### `tf.where(input, name=None)` <a class="md-anchor" id="where"></a>
Returns locations of true values in a boolean tensor.
@@ -446,7 +446,7 @@ debug your graph.
- - -
-### tf.is_finite(x, name=None) <a class="md-anchor" id="is_finite"></a>
+### `tf.is_finite(x, name=None)` <a class="md-anchor" id="is_finite"></a>
Returns which elements of x are finite.
@@ -463,7 +463,7 @@ Returns which elements of x are finite.
- - -
-### tf.is_inf(x, name=None) <a class="md-anchor" id="is_inf"></a>
+### `tf.is_inf(x, name=None)` <a class="md-anchor" id="is_inf"></a>
Returns which elements of x are Inf.
@@ -480,7 +480,7 @@ Returns which elements of x are Inf.
- - -
-### tf.is_nan(x, name=None) <a class="md-anchor" id="is_nan"></a>
+### `tf.is_nan(x, name=None)` <a class="md-anchor" id="is_nan"></a>
Returns which elements of x are NaN.
@@ -497,7 +497,7 @@ Returns which elements of x are NaN.
- - -
-### tf.verify_tensor_all_finite(t, msg, name=None) <a class="md-anchor" id="verify_tensor_all_finite"></a>
+### `tf.verify_tensor_all_finite(t, msg, name=None)` <a class="md-anchor" id="verify_tensor_all_finite"></a>
Assert that the tensor does not contain any NaN's or Inf's.
@@ -515,7 +515,7 @@ Assert that the tensor does not contain any NaN's or Inf's.
- - -
-### tf.check_numerics(tensor, message, name=None) <a class="md-anchor" id="check_numerics"></a>
+### `tf.check_numerics(tensor, message, name=None)` <a class="md-anchor" id="check_numerics"></a>
Checks a tensor for NaN and Inf values.
@@ -536,7 +536,7 @@ that are not a number (NaN) or infinity (Inf). Otherwise, passes `tensor` as-is.
- - -
-### tf.add_check_numerics_ops() <a class="md-anchor" id="add_check_numerics_ops"></a>
+### `tf.add_check_numerics_ops()` <a class="md-anchor" id="add_check_numerics_ops"></a>
Connect a check_numerics to every floating point tensor.
@@ -552,7 +552,7 @@ all of its (`float` or `double`) inputs is guaranteed to run before the
- - -
-### tf.Assert(condition, data, summarize=None, name=None) <a class="md-anchor" id="Assert"></a>
+### `tf.Assert(condition, data, summarize=None, name=None)` <a class="md-anchor" id="Assert"></a>
Asserts that the given condition is true.
@@ -570,7 +570,7 @@ If `condition` evaluates to false, print the list of tensors in `data`.
- - -
-### tf.Print(input_, data, message=None, first_n=None, summarize=None, name=None) <a class="md-anchor" id="Print"></a>
+### `tf.Print(input_, data, message=None, first_n=None, summarize=None, name=None)` <a class="md-anchor" id="Print"></a>
Prints a list of tensors.
diff --git a/tensorflow/g3doc/api_docs/python/framework.md b/tensorflow/g3doc/api_docs/python/framework.md
index 4107a8459e..1900af97a2 100644
--- a/tensorflow/g3doc/api_docs/python/framework.md
+++ b/tensorflow/g3doc/api_docs/python/framework.md
@@ -10,26 +10,26 @@
* [class tf.Tensor](#Tensor)
* [Tensor types](#AUTOGENERATED-tensor-types)
* [class tf.DType](#DType)
- * [tf.as_dtype(type_value)](#as_dtype)
+ * [`tf.as_dtype(type_value)`](#as_dtype)
* [Utility functions](#AUTOGENERATED-utility-functions)
- * [tf.device(dev)](#device)
- * [tf.name_scope(name)](#name_scope)
- * [tf.control_dependencies(control_inputs)](#control_dependencies)
- * [tf.convert_to_tensor(value, dtype=None, name=None)](#convert_to_tensor)
- * [tf.get_default_graph()](#get_default_graph)
- * [tf.import_graph_def(graph_def, input_map=None, return_elements=None, name=None, op_dict=None)](#import_graph_def)
+ * [`tf.device(dev)`](#device)
+ * [`tf.name_scope(name)`](#name_scope)
+ * [`tf.control_dependencies(control_inputs)`](#control_dependencies)
+ * [`tf.convert_to_tensor(value, dtype=None, name=None)`](#convert_to_tensor)
+ * [`tf.get_default_graph()`](#get_default_graph)
+ * [`tf.import_graph_def(graph_def, input_map=None, return_elements=None, name=None, op_dict=None)`](#import_graph_def)
* [Graph collections](#AUTOGENERATED-graph-collections)
- * [tf.add_to_collection(name, value)](#add_to_collection)
- * [tf.get_collection(key, scope=None)](#get_collection)
+ * [`tf.add_to_collection(name, value)`](#add_to_collection)
+ * [`tf.get_collection(key, scope=None)`](#get_collection)
* [class tf.GraphKeys](#GraphKeys)
* [Defining new operations](#AUTOGENERATED-defining-new-operations)
* [class tf.RegisterGradient](#RegisterGradient)
- * [tf.NoGradient(op_type)](#NoGradient)
+ * [`tf.NoGradient(op_type)`](#NoGradient)
* [class tf.RegisterShape](#RegisterShape)
* [class tf.TensorShape](#TensorShape)
* [class tf.Dimension](#Dimension)
- * [tf.op_scope(values, name, default_name)](#op_scope)
- * [tf.get_seed(op_seed)](#get_seed)
+ * [`tf.op_scope(values, name, default_name)`](#op_scope)
+ * [`tf.get_seed(op_seed)`](#get_seed)
<!-- TOC-END This section was generated by neural network, THANKS FOR READING! -->
@@ -78,14 +78,14 @@ are not thread-safe.
- - -
-#### tf.Graph.__init__() <a class="md-anchor" id="Graph.__init__"></a>
+#### `tf.Graph.__init__()` <a class="md-anchor" id="Graph.__init__"></a>
Creates a new, empty Graph.
- - -
-#### tf.Graph.as_default() <a class="md-anchor" id="Graph.as_default"></a>
+#### `tf.Graph.as_default()` <a class="md-anchor" id="Graph.as_default"></a>
Returns a context manager that makes this `Graph` the default graph.
@@ -123,7 +123,7 @@ with tf.Graph().as_default() as g:
- - -
-#### tf.Graph.as_graph_def(from_version=None) <a class="md-anchor" id="Graph.as_graph_def"></a>
+#### `tf.Graph.as_graph_def(from_version=None)` <a class="md-anchor" id="Graph.as_graph_def"></a>
Returns a serialized `GraphDef` representation of this graph.
@@ -148,7 +148,7 @@ This method is thread-safe.
- - -
-#### tf.Graph.finalize() <a class="md-anchor" id="Graph.finalize"></a>
+#### `tf.Graph.finalize()` <a class="md-anchor" id="Graph.finalize"></a>
Finalizes this graph, making it read-only.
@@ -160,14 +160,14 @@ when using a [`QueueRunner`](train.md#QueueRunner).
- - -
-#### tf.Graph.finalized <a class="md-anchor" id="Graph.finalized"></a>
+#### `tf.Graph.finalized` <a class="md-anchor" id="Graph.finalized"></a>
True if this graph has been finalized.
- - -
-#### tf.Graph.control_dependencies(control_inputs) <a class="md-anchor" id="Graph.control_dependencies"></a>
+#### `tf.Graph.control_dependencies(control_inputs)` <a class="md-anchor" id="Graph.control_dependencies"></a>
Returns a context manager that specifies control dependencies.
@@ -236,7 +236,7 @@ def my_func(pred, tensor):
- - -
-#### tf.Graph.device(device_name_or_function) <a class="md-anchor" id="Graph.device"></a>
+#### `tf.Graph.device(device_name_or_function)` <a class="md-anchor" id="Graph.device"></a>
Returns a context manager that specifies the default device to use.
@@ -288,7 +288,7 @@ with g.device(matmul_on_gpu):
- - -
-#### tf.Graph.name_scope(name) <a class="md-anchor" id="Graph.name_scope"></a>
+#### `tf.Graph.name_scope(name)` <a class="md-anchor" id="Graph.name_scope"></a>
Returns a context manager that creates hierarchical names for operations.
@@ -379,7 +379,7 @@ additional collections by specifying a new name.
- - -
-#### tf.Graph.add_to_collection(name, value) <a class="md-anchor" id="Graph.add_to_collection"></a>
+#### `tf.Graph.add_to_collection(name, value)` <a class="md-anchor" id="Graph.add_to_collection"></a>
Stores `value` in the collection with the given `name`.
@@ -393,7 +393,7 @@ Stores `value` in the collection with the given `name`.
- - -
-#### tf.Graph.get_collection(name, scope=None) <a class="md-anchor" id="Graph.get_collection"></a>
+#### `tf.Graph.get_collection(name, scope=None)` <a class="md-anchor" id="Graph.get_collection"></a>
Returns a list of values in the collection with the given `name`.
@@ -416,7 +416,7 @@ Returns a list of values in the collection with the given `name`.
- - -
-#### tf.Graph.as_graph_element(obj, allow_tensor=True, allow_operation=True) <a class="md-anchor" id="Graph.as_graph_element"></a>
+#### `tf.Graph.as_graph_element(obj, allow_tensor=True, allow_operation=True)` <a class="md-anchor" id="Graph.as_graph_element"></a>
Returns the object referred to by `obj`, as an `Operation` or `Tensor`.
@@ -454,7 +454,7 @@ This method may be called concurrently from multiple threads.
- - -
-#### tf.Graph.get_operation_by_name(name) <a class="md-anchor" id="Graph.get_operation_by_name"></a>
+#### `tf.Graph.get_operation_by_name(name)` <a class="md-anchor" id="Graph.get_operation_by_name"></a>
Returns the `Operation` with the given `name`.
@@ -478,7 +478,7 @@ This method may be called concurrently from multiple threads.
- - -
-#### tf.Graph.get_tensor_by_name(name) <a class="md-anchor" id="Graph.get_tensor_by_name"></a>
+#### `tf.Graph.get_tensor_by_name(name)` <a class="md-anchor" id="Graph.get_tensor_by_name"></a>
Returns the `Tensor` with the given `name`.
@@ -502,7 +502,7 @@ This method may be called concurrently from multiple threads.
- - -
-#### tf.Graph.get_operations() <a class="md-anchor" id="Graph.get_operations"></a>
+#### `tf.Graph.get_operations()` <a class="md-anchor" id="Graph.get_operations"></a>
Return the list of operations in the graph.
@@ -520,7 +520,7 @@ This method may be called concurrently from multiple threads.
- - -
-#### tf.Graph.get_default_device() <a class="md-anchor" id="Graph.get_default_device"></a>
+#### `tf.Graph.get_default_device()` <a class="md-anchor" id="Graph.get_default_device"></a>
Returns the default device.
@@ -531,13 +531,13 @@ Returns the default device.
- - -
-#### tf.Graph.seed <a class="md-anchor" id="Graph.seed"></a>
+#### `tf.Graph.seed` <a class="md-anchor" id="Graph.seed"></a>
- - -
-#### tf.Graph.unique_name(name) <a class="md-anchor" id="Graph.unique_name"></a>
+#### `tf.Graph.unique_name(name)` <a class="md-anchor" id="Graph.unique_name"></a>
Return a unique Operation name for "name".
@@ -563,14 +563,14 @@ and in various visualization tools such as TensorBoard.
- - -
-#### tf.Graph.version <a class="md-anchor" id="Graph.version"></a>
+#### `tf.Graph.version` <a class="md-anchor" id="Graph.version"></a>
Returns a version number that increases as ops are added to the graph.
- - -
-#### tf.Graph.create_op(op_type, inputs, dtypes, input_types=None, name=None, attrs=None, op_def=None, compute_shapes=True) <a class="md-anchor" id="Graph.create_op"></a>
+#### `tf.Graph.create_op(op_type, inputs, dtypes, input_types=None, name=None, attrs=None, op_def=None, compute_shapes=True)` <a class="md-anchor" id="Graph.create_op"></a>
Creates an `Operation` in this graph.
@@ -612,7 +612,7 @@ the default graph.
- - -
-#### tf.Graph.gradient_override_map(op_type_map) <a class="md-anchor" id="Graph.gradient_override_map"></a>
+#### `tf.Graph.gradient_override_map(op_type_map)` <a class="md-anchor" id="Graph.gradient_override_map"></a>
EXPERIMENTAL: A context manager for overriding gradient functions.
@@ -675,25 +675,25 @@ be executed by passing it to [`Session.run()`](client.md#Session.run).
- - -
-#### tf.Operation.name <a class="md-anchor" id="Operation.name"></a>
+#### `tf.Operation.name` <a class="md-anchor" id="Operation.name"></a>
The full name of this operation.
- - -
-#### tf.Operation.type <a class="md-anchor" id="Operation.type"></a>
+#### `tf.Operation.type` <a class="md-anchor" id="Operation.type"></a>
The type of the op (e.g. `"MatMul"`).
- - -
-#### tf.Operation.inputs <a class="md-anchor" id="Operation.inputs"></a>
+#### `tf.Operation.inputs` <a class="md-anchor" id="Operation.inputs"></a>
The list of `Tensor` objects representing the data inputs of this op.
- - -
-#### tf.Operation.control_inputs <a class="md-anchor" id="Operation.control_inputs"></a>
+#### `tf.Operation.control_inputs` <a class="md-anchor" id="Operation.control_inputs"></a>
The `Operation` objects on which this op has a control dependency.
@@ -709,13 +709,13 @@ in the correct order.
- - -
-#### tf.Operation.outputs <a class="md-anchor" id="Operation.outputs"></a>
+#### `tf.Operation.outputs` <a class="md-anchor" id="Operation.outputs"></a>
The list of `Tensor` objects representing the outputs of this op.
- - -
-#### tf.Operation.device <a class="md-anchor" id="Operation.device"></a>
+#### `tf.Operation.device` <a class="md-anchor" id="Operation.device"></a>
The name of the device to which this op has been assigned, if any.
@@ -726,14 +726,14 @@ The name of the device to which this op has been assigned, if any.
- - -
-#### tf.Operation.graph <a class="md-anchor" id="Operation.graph"></a>
+#### `tf.Operation.graph` <a class="md-anchor" id="Operation.graph"></a>
The `Graph` that contains this operation.
- - -
-#### tf.Operation.run(feed_dict=None, session=None) <a class="md-anchor" id="Operation.run"></a>
+#### `tf.Operation.run(feed_dict=None, session=None)` <a class="md-anchor" id="Operation.run"></a>
Runs this operation in a `Session`.
@@ -757,7 +757,7 @@ available, or `session` must be specified explicitly.
- - -
-#### tf.Operation.get_attr(name) <a class="md-anchor" id="Operation.get_attr"></a>
+#### `tf.Operation.get_attr(name)` <a class="md-anchor" id="Operation.get_attr"></a>
Returns the value of the attr of this op with the given `name`.
@@ -778,7 +778,7 @@ Returns the value of the attr of this op with the given `name`.
- - -
-#### tf.Operation.traceback <a class="md-anchor" id="Operation.traceback"></a>
+#### `tf.Operation.traceback` <a class="md-anchor" id="Operation.traceback"></a>
Returns the call stack from when this operation was constructed.
@@ -786,7 +786,7 @@ Returns the call stack from when this operation was constructed.
#### Other Methods <a class="md-anchor" id="AUTOGENERATED-other-methods"></a>
- - -
-#### tf.Operation.__init__(node_def, g, inputs=None, output_types=None, control_inputs=None, input_types=None, original_op=None, op_def=None) <a class="md-anchor" id="Operation.__init__"></a>
+#### `tf.Operation.__init__(node_def, g, inputs=None, output_types=None, control_inputs=None, input_types=None, original_op=None, op_def=None)` <a class="md-anchor" id="Operation.__init__"></a>
Creates an `Operation`.
@@ -833,7 +833,7 @@ regular expression:
- - -
-#### tf.Operation.node_def <a class="md-anchor" id="Operation.node_def"></a>
+#### `tf.Operation.node_def` <a class="md-anchor" id="Operation.node_def"></a>
Returns a serialized `NodeDef` representation of this operation.
@@ -845,7 +845,7 @@ Returns a serialized `NodeDef` representation of this operation.
- - -
-#### tf.Operation.op_def <a class="md-anchor" id="Operation.op_def"></a>
+#### `tf.Operation.op_def` <a class="md-anchor" id="Operation.op_def"></a>
Returns the `OpDef` proto that represents the type of this op.
@@ -857,7 +857,7 @@ Returns the `OpDef` proto that represents the type of this op.
- - -
-#### tf.Operation.values() <a class="md-anchor" id="Operation.values"></a>
+#### `tf.Operation.values()` <a class="md-anchor" id="Operation.values"></a>
DEPRECATED: Use outputs.
@@ -906,37 +906,37 @@ result = sess.run(e)
- - -
-#### tf.Tensor.dtype <a class="md-anchor" id="Tensor.dtype"></a>
+#### `tf.Tensor.dtype` <a class="md-anchor" id="Tensor.dtype"></a>
The `DType` of elements in this tensor.
- - -
-#### tf.Tensor.name <a class="md-anchor" id="Tensor.name"></a>
+#### `tf.Tensor.name` <a class="md-anchor" id="Tensor.name"></a>
The string name of this tensor.
- - -
-#### tf.Tensor.value_index <a class="md-anchor" id="Tensor.value_index"></a>
+#### `tf.Tensor.value_index` <a class="md-anchor" id="Tensor.value_index"></a>
The index of this tensor in the outputs of its `Operation`.
- - -
-#### tf.Tensor.graph <a class="md-anchor" id="Tensor.graph"></a>
+#### `tf.Tensor.graph` <a class="md-anchor" id="Tensor.graph"></a>
The `Graph` that contains this tensor.
- - -
-#### tf.Tensor.op <a class="md-anchor" id="Tensor.op"></a>
+#### `tf.Tensor.op` <a class="md-anchor" id="Tensor.op"></a>
The `Operation` that produces this tensor as an output.
- - -
-#### tf.Tensor.consumers() <a class="md-anchor" id="Tensor.consumers"></a>
+#### `tf.Tensor.consumers()` <a class="md-anchor" id="Tensor.consumers"></a>
Returns a list of `Operation`s that consume this tensor.
@@ -948,7 +948,7 @@ Returns a list of `Operation`s that consume this tensor.
- - -
-#### tf.Tensor.eval(feed_dict=None, session=None) <a class="md-anchor" id="Tensor.eval"></a>
+#### `tf.Tensor.eval(feed_dict=None, session=None)` <a class="md-anchor" id="Tensor.eval"></a>
Evaluates this tensor in a `Session`.
@@ -977,7 +977,7 @@ available, or `session` must be specified explicitly.
- - -
-#### tf.Tensor.get_shape() <a class="md-anchor" id="Tensor.get_shape"></a>
+#### `tf.Tensor.get_shape()` <a class="md-anchor" id="Tensor.get_shape"></a>
Returns the `TensorShape` that represents the shape of this tensor.
@@ -1024,7 +1024,7 @@ inferred shape.
- - -
-#### tf.Tensor.set_shape(shape) <a class="md-anchor" id="Tensor.set_shape"></a>
+#### `tf.Tensor.set_shape(shape)` <a class="md-anchor" id="Tensor.set_shape"></a>
Updates the shape of this tensor.
@@ -1065,7 +1065,7 @@ print image.get_shape()
#### Other Methods <a class="md-anchor" id="AUTOGENERATED-other-methods"></a>
- - -
-#### tf.Tensor.__init__(op, value_index, dtype) <a class="md-anchor" id="Tensor.__init__"></a>
+#### `tf.Tensor.__init__(op, value_index, dtype)` <a class="md-anchor" id="Tensor.__init__"></a>
Creates a new `Tensor`.
@@ -1085,7 +1085,7 @@ Creates a new `Tensor`.
- - -
-#### tf.Tensor.device <a class="md-anchor" id="Tensor.device"></a>
+#### `tf.Tensor.device` <a class="md-anchor" id="Tensor.device"></a>
The name of the device on which this tensor will be produced, or None.
@@ -1127,7 +1127,7 @@ names to a `DType` object.
- - -
-#### tf.DType.is_compatible_with(other) <a class="md-anchor" id="DType.is_compatible_with"></a>
+#### `tf.DType.is_compatible_with(other)` <a class="md-anchor" id="DType.is_compatible_with"></a>
Returns True if the `other` DType will be converted to this DType.
@@ -1153,50 +1153,50 @@ DType(T).as_ref.is_compatible_with(DType(T).as_ref) == True
- - -
-#### tf.DType.name <a class="md-anchor" id="DType.name"></a>
+#### `tf.DType.name` <a class="md-anchor" id="DType.name"></a>
Returns the string name for this `DType`.
- - -
-#### tf.DType.base_dtype <a class="md-anchor" id="DType.base_dtype"></a>
+#### `tf.DType.base_dtype` <a class="md-anchor" id="DType.base_dtype"></a>
Returns a non-reference `DType` based on this `DType`.
- - -
-#### tf.DType.is_ref_dtype <a class="md-anchor" id="DType.is_ref_dtype"></a>
+#### `tf.DType.is_ref_dtype` <a class="md-anchor" id="DType.is_ref_dtype"></a>
Returns `True` if this `DType` represents a reference type.
- - -
-#### tf.DType.as_ref <a class="md-anchor" id="DType.as_ref"></a>
+#### `tf.DType.as_ref` <a class="md-anchor" id="DType.as_ref"></a>
Returns a reference `DType` based on this `DType`.
- - -
-#### tf.DType.is_integer <a class="md-anchor" id="DType.is_integer"></a>
+#### `tf.DType.is_integer` <a class="md-anchor" id="DType.is_integer"></a>
Returns whether this is a (non-quantized) integer type.
- - -
-#### tf.DType.is_quantized <a class="md-anchor" id="DType.is_quantized"></a>
+#### `tf.DType.is_quantized` <a class="md-anchor" id="DType.is_quantized"></a>
Returns whether this is a quantized data type.
- - -
-#### tf.DType.as_numpy_dtype <a class="md-anchor" id="DType.as_numpy_dtype"></a>
+#### `tf.DType.as_numpy_dtype` <a class="md-anchor" id="DType.as_numpy_dtype"></a>
Returns a `numpy.dtype` based on this `DType`.
- - -
-#### tf.DType.as_datatype_enum <a class="md-anchor" id="DType.as_datatype_enum"></a>
+#### `tf.DType.as_datatype_enum` <a class="md-anchor" id="DType.as_datatype_enum"></a>
Returns a `types_pb2.DataType` enum value based on this `DType`.
@@ -1204,7 +1204,7 @@ Returns a `types_pb2.DataType` enum value based on this `DType`.
#### Other Methods <a class="md-anchor" id="AUTOGENERATED-other-methods"></a>
- - -
-#### tf.DType.__init__(type_enum) <a class="md-anchor" id="DType.__init__"></a>
+#### `tf.DType.__init__(type_enum)` <a class="md-anchor" id="DType.__init__"></a>
Creates a new `DataType`.
@@ -1225,7 +1225,7 @@ types.as_dtype() function.
- - -
-#### tf.DType.max <a class="md-anchor" id="DType.max"></a>
+#### `tf.DType.max` <a class="md-anchor" id="DType.max"></a>
Returns the maximum representable value in this data type.
@@ -1236,7 +1236,7 @@ Returns the maximum representable value in this data type.
- - -
-#### tf.DType.min <a class="md-anchor" id="DType.min"></a>
+#### `tf.DType.min` <a class="md-anchor" id="DType.min"></a>
Returns the minimum representable value in this data type.
@@ -1248,7 +1248,7 @@ Returns the minimum representable value in this data type.
- - -
-### tf.as_dtype(type_value) <a class="md-anchor" id="as_dtype"></a>
+### `tf.as_dtype(type_value)` <a class="md-anchor" id="as_dtype"></a>
Converts the given `type_value` to a `DType`.
@@ -1275,7 +1275,7 @@ Converts the given `type_value` to a `DType`.
- - -
-### tf.device(dev) <a class="md-anchor" id="device"></a>
+### `tf.device(dev)` <a class="md-anchor" id="device"></a>
Wrapper for `Graph.device()` using the default graph.
@@ -1295,7 +1295,7 @@ See [`Graph.name_scope()`](framework.md#Graph.name_scope) for more details.
- - -
-### tf.name_scope(name) <a class="md-anchor" id="name_scope"></a>
+### `tf.name_scope(name)` <a class="md-anchor" id="name_scope"></a>
Wrapper for `Graph.name_scope()` using the default graph.
@@ -1314,7 +1314,7 @@ See [`Graph.name_scope()`](framework.md#Graph.name_scope) for more details.
- - -
-### tf.control_dependencies(control_inputs) <a class="md-anchor" id="control_dependencies"></a>
+### `tf.control_dependencies(control_inputs)` <a class="md-anchor" id="control_dependencies"></a>
Wrapper for `Graph.control_dependencies()` using the default graph.
@@ -1336,7 +1336,7 @@ for more details.
- - -
-### tf.convert_to_tensor(value, dtype=None, name=None) <a class="md-anchor" id="convert_to_tensor"></a>
+### `tf.convert_to_tensor(value, dtype=None, name=None)` <a class="md-anchor" id="convert_to_tensor"></a>
Converts the given `value` to a `Tensor`.
@@ -1353,9 +1353,9 @@ def my_func(arg):
return tf.matmul(arg, arg) + arg
# The following calls are equivalent.
-value_1 = my_func(tf.constant([[1.0, 2.0], [3.0, 4.0]))
+value_1 = my_func(tf.constant([[1.0, 2.0], [3.0, 4.0]]))
value_2 = my_func([[1.0, 2.0], [3.0, 4.0]])
-value_3 = my_func(numpy.array([[1.0, 2.0], [3.0, 4.0]], dtype=numpy.float32))
+value_3 = my_func(np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32))
```
This function can be useful when composing a new operation in Python
@@ -1385,7 +1385,7 @@ and scalars in addition to `Tensor` objects.
- - -
-### tf.get_default_graph() <a class="md-anchor" id="get_default_graph"></a>
+### `tf.get_default_graph()` <a class="md-anchor" id="get_default_graph"></a>
Returns the default graph for the current thread.
@@ -1405,7 +1405,7 @@ thread's function.
- - -
-### tf.import_graph_def(graph_def, input_map=None, return_elements=None, name=None, op_dict=None) <a class="md-anchor" id="import_graph_def"></a>
+### `tf.import_graph_def(graph_def, input_map=None, return_elements=None, name=None, op_dict=None)` <a class="md-anchor" id="import_graph_def"></a>
Imports the TensorFlow graph in `graph_def` into the Python `Graph`.
@@ -1454,7 +1454,7 @@ protocol buffer, and extract individual objects in the `GraphDef` as
- - -
-### tf.add_to_collection(name, value) <a class="md-anchor" id="add_to_collection"></a>
+### `tf.add_to_collection(name, value)` <a class="md-anchor" id="add_to_collection"></a>
Wrapper for `Graph.add_to_collection()` using the default graph.
@@ -1471,7 +1471,7 @@ for more details.
- - -
-### tf.get_collection(key, scope=None) <a class="md-anchor" id="get_collection"></a>
+### `tf.get_collection(key, scope=None)` <a class="md-anchor" id="get_collection"></a>
Wrapper for `Graph.get_collection()` using the default graph.
@@ -1555,7 +1555,7 @@ that defines the operation.
- - -
-#### tf.RegisterGradient.__init__(op_type) <a class="md-anchor" id="RegisterGradient.__init__"></a>
+#### `tf.RegisterGradient.__init__(op_type)` <a class="md-anchor" id="RegisterGradient.__init__"></a>
Creates a new decorator with `op_type` as the Operation type.
@@ -1569,7 +1569,7 @@ Creates a new decorator with `op_type` as the Operation type.
- - -
-### tf.NoGradient(op_type) <a class="md-anchor" id="NoGradient"></a>
+### `tf.NoGradient(op_type)` <a class="md-anchor" id="NoGradient"></a>
Specifies that ops of type `op_type` do not have a defined gradient.
@@ -1619,7 +1619,7 @@ operation. This corresponds to the `OpDef.name` field for the proto
that defines the operation.
- - -
-#### tf.RegisterShape.__init__(op_type) <a class="md-anchor" id="RegisterShape.__init__"></a>
+#### `tf.RegisterShape.__init__(op_type)` <a class="md-anchor" id="RegisterShape.__init__"></a>
Saves the "op_type" as the Operation type.
@@ -1650,7 +1650,7 @@ explicitly using [`Tensor.set_shape()`](framework.md#Tensor.set_shape).
- - -
-#### tf.TensorShape.merge_with(other) <a class="md-anchor" id="TensorShape.merge_with"></a>
+#### `tf.TensorShape.merge_with(other)` <a class="md-anchor" id="TensorShape.merge_with"></a>
Returns a `TensorShape` combining the information in `self` and `other`.
@@ -1675,7 +1675,7 @@ according to the rules defined for `Dimension.merge_with()`.
- - -
-#### tf.TensorShape.concatenate(other) <a class="md-anchor" id="TensorShape.concatenate"></a>
+#### `tf.TensorShape.concatenate(other)` <a class="md-anchor" id="TensorShape.concatenate"></a>
Returns the concatenation of the dimension in `self` and `other`.
@@ -1698,26 +1698,26 @@ information for use with slicing.
- - -
-#### tf.TensorShape.ndims <a class="md-anchor" id="TensorShape.ndims"></a>
+#### `tf.TensorShape.ndims` <a class="md-anchor" id="TensorShape.ndims"></a>
Returns the rank of this shape, or None if it is unspecified.
- - -
-#### tf.TensorShape.dims <a class="md-anchor" id="TensorShape.dims"></a>
+#### `tf.TensorShape.dims` <a class="md-anchor" id="TensorShape.dims"></a>
Returns a list of Dimensions, or None if the shape is unspecified.
- - -
-#### tf.TensorShape.as_list() <a class="md-anchor" id="TensorShape.as_list"></a>
+#### `tf.TensorShape.as_list()` <a class="md-anchor" id="TensorShape.as_list"></a>
Returns a list of integers or None for each dimension.
- - -
-#### tf.TensorShape.is_compatible_with(other) <a class="md-anchor" id="TensorShape.is_compatible_with"></a>
+#### `tf.TensorShape.is_compatible_with(other)` <a class="md-anchor" id="TensorShape.is_compatible_with"></a>
Returns True iff `self` is compatible with `other`.
@@ -1761,7 +1761,7 @@ TensorShape([4, 4]).
- - -
-#### tf.TensorShape.is_fully_defined() <a class="md-anchor" id="TensorShape.is_fully_defined"></a>
+#### `tf.TensorShape.is_fully_defined()` <a class="md-anchor" id="TensorShape.is_fully_defined"></a>
Returns True iff `self` is fully defined in every dimension.
@@ -1769,7 +1769,7 @@ Returns True iff `self` is fully defined in every dimension.
- - -
-#### tf.TensorShape.with_rank(rank) <a class="md-anchor" id="TensorShape.with_rank"></a>
+#### `tf.TensorShape.with_rank(rank)` <a class="md-anchor" id="TensorShape.with_rank"></a>
Returns a shape based on `self` with the given rank.
@@ -1793,7 +1793,7 @@ known rank.
- - -
-#### tf.TensorShape.with_rank_at_least(rank) <a class="md-anchor" id="TensorShape.with_rank_at_least"></a>
+#### `tf.TensorShape.with_rank_at_least(rank)` <a class="md-anchor" id="TensorShape.with_rank_at_least"></a>
Returns a shape based on `self` with at least the given rank.
@@ -1816,7 +1816,7 @@ Returns a shape based on `self` with at least the given rank.
- - -
-#### tf.TensorShape.with_rank_at_most(rank) <a class="md-anchor" id="TensorShape.with_rank_at_most"></a>
+#### `tf.TensorShape.with_rank_at_most(rank)` <a class="md-anchor" id="TensorShape.with_rank_at_most"></a>
Returns a shape based on `self` with at most the given rank.
@@ -1840,7 +1840,7 @@ Returns a shape based on `self` with at most the given rank.
- - -
-#### tf.TensorShape.assert_has_rank(rank) <a class="md-anchor" id="TensorShape.assert_has_rank"></a>
+#### `tf.TensorShape.assert_has_rank(rank)` <a class="md-anchor" id="TensorShape.assert_has_rank"></a>
Raises an exception if `self` is not compatible with the given `rank`.
@@ -1857,7 +1857,7 @@ Raises an exception if `self` is not compatible with the given `rank`.
- - -
-#### tf.TensorShape.assert_same_rank(other) <a class="md-anchor" id="TensorShape.assert_same_rank"></a>
+#### `tf.TensorShape.assert_same_rank(other)` <a class="md-anchor" id="TensorShape.assert_same_rank"></a>
Raises an exception if `self` and `other` do not have compatible ranks.
@@ -1875,7 +1875,7 @@ Raises an exception if `self` and `other` do not have compatible ranks.
- - -
-#### tf.TensorShape.assert_is_compatible_with(other) <a class="md-anchor" id="TensorShape.assert_is_compatible_with"></a>
+#### `tf.TensorShape.assert_is_compatible_with(other)` <a class="md-anchor" id="TensorShape.assert_is_compatible_with"></a>
Raises exception if `self` and `other` do not represent the same shape.
@@ -1895,7 +1895,7 @@ This method can be used to assert that there exists a shape that both
- - -
-#### tf.TensorShape.assert_is_fully_defined() <a class="md-anchor" id="TensorShape.assert_is_fully_defined"></a>
+#### `tf.TensorShape.assert_is_fully_defined()` <a class="md-anchor" id="TensorShape.assert_is_fully_defined"></a>
Raises an exception if `self` is not fully defined in every dimension.
@@ -1909,7 +1909,7 @@ Raises an exception if `self` is not fully defined in every dimension.
#### Other Methods <a class="md-anchor" id="AUTOGENERATED-other-methods"></a>
- - -
-#### tf.TensorShape.__init__(dims) <a class="md-anchor" id="TensorShape.__init__"></a>
+#### `tf.TensorShape.__init__(dims)` <a class="md-anchor" id="TensorShape.__init__"></a>
Creates a new TensorShape with the given dimensions.
@@ -1922,14 +1922,14 @@ Creates a new TensorShape with the given dimensions.
- - -
-#### tf.TensorShape.as_dimension_list() <a class="md-anchor" id="TensorShape.as_dimension_list"></a>
+#### `tf.TensorShape.as_dimension_list()` <a class="md-anchor" id="TensorShape.as_dimension_list"></a>
DEPRECATED: use as_list().
- - -
-#### tf.TensorShape.num_elements() <a class="md-anchor" id="TensorShape.num_elements"></a>
+#### `tf.TensorShape.num_elements()` <a class="md-anchor" id="TensorShape.num_elements"></a>
Returns the total number of elements, or none for incomplete shapes.
@@ -1942,14 +1942,14 @@ Returns the total number of elements, or none for incomplete shapes.
Represents the value of one dimension in a TensorShape.
- - -
-#### tf.Dimension.__init__(value) <a class="md-anchor" id="Dimension.__init__"></a>
+#### `tf.Dimension.__init__(value)` <a class="md-anchor" id="Dimension.__init__"></a>
Creates a new Dimension with the given value.
- - -
-#### tf.Dimension.assert_is_compatible_with(other) <a class="md-anchor" id="Dimension.assert_is_compatible_with"></a>
+#### `tf.Dimension.assert_is_compatible_with(other)` <a class="md-anchor" id="Dimension.assert_is_compatible_with"></a>
Raises an exception if `other` is not compatible with this Dimension.
@@ -1967,7 +1967,7 @@ Raises an exception if `other` is not compatible with this Dimension.
- - -
-#### tf.Dimension.is_compatible_with(other) <a class="md-anchor" id="Dimension.is_compatible_with"></a>
+#### `tf.Dimension.is_compatible_with(other)` <a class="md-anchor" id="Dimension.is_compatible_with"></a>
Returns true if `other` is compatible with this Dimension.
@@ -1986,7 +1986,7 @@ An unknown Dimension is compatible with all other Dimensions.
- - -
-#### tf.Dimension.merge_with(other) <a class="md-anchor" id="Dimension.merge_with"></a>
+#### `tf.Dimension.merge_with(other)` <a class="md-anchor" id="Dimension.merge_with"></a>
Returns a Dimension that combines the information in `self` and `other`.
@@ -2017,14 +2017,14 @@ Dimensions are combined as follows:
- - -
-#### tf.Dimension.value <a class="md-anchor" id="Dimension.value"></a>
+#### `tf.Dimension.value` <a class="md-anchor" id="Dimension.value"></a>
The value of this dimension, or None if it is unknown.
- - -
-### tf.op_scope(values, name, default_name) <a class="md-anchor" id="op_scope"></a>
+### `tf.op_scope(values, name, default_name)` <a class="md-anchor" id="op_scope"></a>
Returns a context manager for use when defining a Python op.
@@ -2058,7 +2058,7 @@ def my_op(a, b, c, name=None):
- - -
-### tf.get_seed(op_seed) <a class="md-anchor" id="get_seed"></a>
+### `tf.get_seed(op_seed)` <a class="md-anchor" id="get_seed"></a>
Returns the local seeds an operation should use given an op-specific seed.
diff --git a/tensorflow/g3doc/api_docs/python/image.md b/tensorflow/g3doc/api_docs/python/image.md
index 735ceaf0dd..c631af9c97 100644
--- a/tensorflow/g3doc/api_docs/python/image.md
+++ b/tensorflow/g3doc/api_docs/python/image.md
@@ -9,34 +9,34 @@ accepted by [`tf.convert_to_tensor`](framework.md#convert_to_tensor).
## Contents
### [Images](#AUTOGENERATED-images)
* [Encoding and Decoding](#AUTOGENERATED-encoding-and-decoding)
- * [tf.image.decode_jpeg(contents, channels=None, ratio=None, fancy_upscaling=None, try_recover_truncated=None, acceptable_fraction=None, name=None)](#decode_jpeg)
- * [tf.image.encode_jpeg(image, format=None, quality=None, progressive=None, optimize_size=None, chroma_downsampling=None, density_unit=None, x_density=None, y_density=None, xmp_metadata=None, name=None)](#encode_jpeg)
- * [tf.image.decode_png(contents, channels=None, name=None)](#decode_png)
- * [tf.image.encode_png(image, compression=None, name=None)](#encode_png)
+ * [`tf.image.decode_jpeg(contents, channels=None, ratio=None, fancy_upscaling=None, try_recover_truncated=None, acceptable_fraction=None, name=None)`](#decode_jpeg)
+ * [`tf.image.encode_jpeg(image, format=None, quality=None, progressive=None, optimize_size=None, chroma_downsampling=None, density_unit=None, x_density=None, y_density=None, xmp_metadata=None, name=None)`](#encode_jpeg)
+ * [`tf.image.decode_png(contents, channels=None, name=None)`](#decode_png)
+ * [`tf.image.encode_png(image, compression=None, name=None)`](#encode_png)
* [Resizing](#AUTOGENERATED-resizing)
- * [tf.image.resize_images(images, new_height, new_width, method=0)](#resize_images)
- * [tf.image.resize_area(images, size, name=None)](#resize_area)
- * [tf.image.resize_bicubic(images, size, name=None)](#resize_bicubic)
- * [tf.image.resize_bilinear(images, size, name=None)](#resize_bilinear)
- * [tf.image.resize_nearest_neighbor(images, size, name=None)](#resize_nearest_neighbor)
+ * [`tf.image.resize_images(images, new_height, new_width, method=0)`](#resize_images)
+ * [`tf.image.resize_area(images, size, name=None)`](#resize_area)
+ * [`tf.image.resize_bicubic(images, size, name=None)`](#resize_bicubic)
+ * [`tf.image.resize_bilinear(images, size, name=None)`](#resize_bilinear)
+ * [`tf.image.resize_nearest_neighbor(images, size, name=None)`](#resize_nearest_neighbor)
* [Cropping](#AUTOGENERATED-cropping)
- * [tf.image.resize_image_with_crop_or_pad(image, target_height, target_width)](#resize_image_with_crop_or_pad)
- * [tf.image.pad_to_bounding_box(image, offset_height, offset_width, target_height, target_width)](#pad_to_bounding_box)
- * [tf.image.crop_to_bounding_box(image, offset_height, offset_width, target_height, target_width)](#crop_to_bounding_box)
- * [tf.image.random_crop(image, size, seed=None, name=None)](#random_crop)
- * [tf.image.extract_glimpse(input, size, offsets, centered=None, normalized=None, uniform_noise=None, name=None)](#extract_glimpse)
+ * [`tf.image.resize_image_with_crop_or_pad(image, target_height, target_width)`](#resize_image_with_crop_or_pad)
+ * [`tf.image.pad_to_bounding_box(image, offset_height, offset_width, target_height, target_width)`](#pad_to_bounding_box)
+ * [`tf.image.crop_to_bounding_box(image, offset_height, offset_width, target_height, target_width)`](#crop_to_bounding_box)
+ * [`tf.image.random_crop(image, size, seed=None, name=None)`](#random_crop)
+ * [`tf.image.extract_glimpse(input, size, offsets, centered=None, normalized=None, uniform_noise=None, name=None)`](#extract_glimpse)
* [Flipping and Transposing](#AUTOGENERATED-flipping-and-transposing)
- * [tf.image.flip_up_down(image)](#flip_up_down)
- * [tf.image.random_flip_up_down(image, seed=None)](#random_flip_up_down)
- * [tf.image.flip_left_right(image)](#flip_left_right)
- * [tf.image.random_flip_left_right(image, seed=None)](#random_flip_left_right)
- * [tf.image.transpose_image(image)](#transpose_image)
+ * [`tf.image.flip_up_down(image)`](#flip_up_down)
+ * [`tf.image.random_flip_up_down(image, seed=None)`](#random_flip_up_down)
+ * [`tf.image.flip_left_right(image)`](#flip_left_right)
+ * [`tf.image.random_flip_left_right(image, seed=None)`](#random_flip_left_right)
+ * [`tf.image.transpose_image(image)`](#transpose_image)
* [Image Adjustments](#AUTOGENERATED-image-adjustments)
- * [tf.image.adjust_brightness(image, delta, min_value=None, max_value=None)](#adjust_brightness)
- * [tf.image.random_brightness(image, max_delta, seed=None)](#random_brightness)
- * [tf.image.adjust_contrast(images, contrast_factor, min_value=None, max_value=None)](#adjust_contrast)
- * [tf.image.random_contrast(image, lower, upper, seed=None)](#random_contrast)
- * [tf.image.per_image_whitening(image)](#per_image_whitening)
+ * [`tf.image.adjust_brightness(image, delta, min_value=None, max_value=None)`](#adjust_brightness)
+ * [`tf.image.random_brightness(image, max_delta, seed=None)`](#random_brightness)
+ * [`tf.image.adjust_contrast(images, contrast_factor, min_value=None, max_value=None)`](#adjust_contrast)
+ * [`tf.image.random_contrast(image, lower, upper, seed=None)`](#random_contrast)
+ * [`tf.image.per_image_whitening(image)`](#per_image_whitening)
<!-- TOC-END This section was generated by neural network, THANKS FOR READING! -->
@@ -56,7 +56,7 @@ presently only support RGB, HSV, and GrayScale.
- - -
-### tf.image.decode_jpeg(contents, channels=None, ratio=None, fancy_upscaling=None, try_recover_truncated=None, acceptable_fraction=None, name=None) <a class="md-anchor" id="decode_jpeg"></a>
+### `tf.image.decode_jpeg(contents, channels=None, ratio=None, fancy_upscaling=None, try_recover_truncated=None, acceptable_fraction=None, name=None)` <a class="md-anchor" id="decode_jpeg"></a>
Decode a JPEG-encoded image to a uint8 tensor.
@@ -100,7 +100,7 @@ downscaling the image later.
- - -
-### tf.image.encode_jpeg(image, format=None, quality=None, progressive=None, optimize_size=None, chroma_downsampling=None, density_unit=None, x_density=None, y_density=None, xmp_metadata=None, name=None) <a class="md-anchor" id="encode_jpeg"></a>
+### `tf.image.encode_jpeg(image, format=None, quality=None, progressive=None, optimize_size=None, chroma_downsampling=None, density_unit=None, x_density=None, y_density=None, xmp_metadata=None, name=None)` <a class="md-anchor" id="encode_jpeg"></a>
JPEG-encode an image.
@@ -155,7 +155,7 @@ in function of the number of channels in `image`:
- - -
-### tf.image.decode_png(contents, channels=None, name=None) <a class="md-anchor" id="decode_png"></a>
+### `tf.image.decode_png(contents, channels=None, name=None)` <a class="md-anchor" id="decode_png"></a>
Decode a PNG-encoded image to a uint8 tensor.
@@ -187,7 +187,7 @@ of color channels.
- - -
-### tf.image.encode_png(image, compression=None, name=None) <a class="md-anchor" id="encode_png"></a>
+### `tf.image.encode_png(image, compression=None, name=None)` <a class="md-anchor" id="encode_png"></a>
PNG-encode an image.
@@ -244,7 +244,7 @@ images from the Queue.</i>
- - -
-### tf.image.resize_images(images, new_height, new_width, method=0) <a class="md-anchor" id="resize_images"></a>
+### `tf.image.resize_images(images, new_height, new_width, method=0)` <a class="md-anchor" id="resize_images"></a>
Resize `images` to `new_width`, `new_height` using the specified `method`.
@@ -289,7 +289,7 @@ the same as `new_width`, `new_height`. To avoid distortions see
- - -
-### tf.image.resize_area(images, size, name=None) <a class="md-anchor" id="resize_area"></a>
+### `tf.image.resize_area(images, size, name=None)` <a class="md-anchor" id="resize_area"></a>
Resize `images` to `size` using area interpolation.
@@ -312,7 +312,7 @@ Input images can be of different types but output images are always float.
- - -
-### tf.image.resize_bicubic(images, size, name=None) <a class="md-anchor" id="resize_bicubic"></a>
+### `tf.image.resize_bicubic(images, size, name=None)` <a class="md-anchor" id="resize_bicubic"></a>
Resize `images` to `size` using bicubic interpolation.
@@ -335,7 +335,7 @@ Input images can be of different types but output images are always float.
- - -
-### tf.image.resize_bilinear(images, size, name=None) <a class="md-anchor" id="resize_bilinear"></a>
+### `tf.image.resize_bilinear(images, size, name=None)` <a class="md-anchor" id="resize_bilinear"></a>
Resize `images` to `size` using bilinear interpolation.
@@ -358,7 +358,7 @@ Input images can be of different types but output images are always float.
- - -
-### tf.image.resize_nearest_neighbor(images, size, name=None) <a class="md-anchor" id="resize_nearest_neighbor"></a>
+### `tf.image.resize_nearest_neighbor(images, size, name=None)` <a class="md-anchor" id="resize_nearest_neighbor"></a>
Resize `images` to `size` using nearest neighbor interpolation.
@@ -385,7 +385,7 @@ Input images can be of different types but output images are always float.
- - -
-### tf.image.resize_image_with_crop_or_pad(image, target_height, target_width) <a class="md-anchor" id="resize_image_with_crop_or_pad"></a>
+### `tf.image.resize_image_with_crop_or_pad(image, target_height, target_width)` <a class="md-anchor" id="resize_image_with_crop_or_pad"></a>
Crops and/or pads an image to a target width and height.
@@ -419,7 +419,7 @@ dimension.
- - -
-### tf.image.pad_to_bounding_box(image, offset_height, offset_width, target_height, target_width) <a class="md-anchor" id="pad_to_bounding_box"></a>
+### `tf.image.pad_to_bounding_box(image, offset_height, offset_width, target_height, target_width)` <a class="md-anchor" id="pad_to_bounding_box"></a>
Pad `image` with zeros to the specified `height` and `width`.
@@ -452,7 +452,7 @@ This op does nothing if `offset_*` is zero and the image already has size
- - -
-### tf.image.crop_to_bounding_box(image, offset_height, offset_width, target_height, target_width) <a class="md-anchor" id="crop_to_bounding_box"></a>
+### `tf.image.crop_to_bounding_box(image, offset_height, offset_width, target_height, target_width)` <a class="md-anchor" id="crop_to_bounding_box"></a>
Crops an image to a specified bounding box.
@@ -485,7 +485,7 @@ lower-right corner is at
- - -
-### tf.image.random_crop(image, size, seed=None, name=None) <a class="md-anchor" id="random_crop"></a>
+### `tf.image.random_crop(image, size, seed=None, name=None)` <a class="md-anchor" id="random_crop"></a>
Randomly crops `image` to size `[target_height, target_width]`.
@@ -508,7 +508,7 @@ fully contains the result.
- - -
-### tf.image.extract_glimpse(input, size, offsets, centered=None, normalized=None, uniform_noise=None, name=None) <a class="md-anchor" id="extract_glimpse"></a>
+### `tf.image.extract_glimpse(input, size, offsets, centered=None, normalized=None, uniform_noise=None, name=None)` <a class="md-anchor" id="extract_glimpse"></a>
Extracts a glimpse from the input tensor.
@@ -564,7 +564,7 @@ The argument `normalized` and `centered` controls how the windows are built:
- - -
-### tf.image.flip_up_down(image) <a class="md-anchor" id="flip_up_down"></a>
+### `tf.image.flip_up_down(image)` <a class="md-anchor" id="flip_up_down"></a>
Flip an image horizontally (upside down).
@@ -590,7 +590,7 @@ See also `reverse()`.
- - -
-### tf.image.random_flip_up_down(image, seed=None) <a class="md-anchor" id="random_flip_up_down"></a>
+### `tf.image.random_flip_up_down(image, seed=None)` <a class="md-anchor" id="random_flip_up_down"></a>
Randomly flips an image vertically (upside down).
@@ -617,7 +617,7 @@ dimension, which is `height`. Otherwise output the image as-is.
- - -
-### tf.image.flip_left_right(image) <a class="md-anchor" id="flip_left_right"></a>
+### `tf.image.flip_left_right(image)` <a class="md-anchor" id="flip_left_right"></a>
Flip an image horizontally (left to right).
@@ -643,7 +643,7 @@ See also `reverse()`.
- - -
-### tf.image.random_flip_left_right(image, seed=None) <a class="md-anchor" id="random_flip_left_right"></a>
+### `tf.image.random_flip_left_right(image, seed=None)` <a class="md-anchor" id="random_flip_left_right"></a>
Randomly flip an image horizontally (left to right).
@@ -670,7 +670,7 @@ second dimension, which is `width`. Otherwise output the image as-is.
- - -
-### tf.image.transpose_image(image) <a class="md-anchor" id="transpose_image"></a>
+### `tf.image.transpose_image(image)` <a class="md-anchor" id="transpose_image"></a>
Transpose an image by swapping the first and second dimension.
@@ -701,7 +701,7 @@ adjustments are often useful to expand a training set and reduce overfitting.
- - -
-### tf.image.adjust_brightness(image, delta, min_value=None, max_value=None) <a class="md-anchor" id="adjust_brightness"></a>
+### `tf.image.adjust_brightness(image, delta, min_value=None, max_value=None)` <a class="md-anchor" id="adjust_brightness"></a>
Adjust the brightness of RGB or Grayscale images.
@@ -728,7 +728,7 @@ maximum allowed values for `image.dtype` respectively.
- - -
-### tf.image.random_brightness(image, max_delta, seed=None) <a class="md-anchor" id="random_brightness"></a>
+### `tf.image.random_brightness(image, max_delta, seed=None)` <a class="md-anchor" id="random_brightness"></a>
Adjust the brightness of images by a random factor.
@@ -760,7 +760,7 @@ have modifications in the range `[-max_delta,max_delta]`.
- - -
-### tf.image.adjust_contrast(images, contrast_factor, min_value=None, max_value=None) <a class="md-anchor" id="adjust_contrast"></a>
+### `tf.image.adjust_contrast(images, contrast_factor, min_value=None, max_value=None)` <a class="md-anchor" id="adjust_contrast"></a>
Adjust contrast of RGB or grayscale images.
@@ -801,7 +801,7 @@ cast back to its original type after clipping.
- - -
-### tf.image.random_contrast(image, lower, upper, seed=None) <a class="md-anchor" id="random_contrast"></a>
+### `tf.image.random_contrast(image, lower, upper, seed=None)` <a class="md-anchor" id="random_contrast"></a>
Adjust the contrase of an image by a random factor.
@@ -830,7 +830,7 @@ picked in the interval `[lower, upper]`.
- - -
-### tf.image.per_image_whitening(image) <a class="md-anchor" id="per_image_whitening"></a>
+### `tf.image.per_image_whitening(image)` <a class="md-anchor" id="per_image_whitening"></a>
Linearly scales `image` to have zero mean and unit norm.
diff --git a/tensorflow/g3doc/api_docs/python/io_ops.md b/tensorflow/g3doc/api_docs/python/io_ops.md
index 5fb838d925..3fbcf3634b 100644
--- a/tensorflow/g3doc/api_docs/python/io_ops.md
+++ b/tensorflow/g3doc/api_docs/python/io_ops.md
@@ -9,7 +9,7 @@ accepted by [`tf.convert_to_tensor`](framework.md#convert_to_tensor).
## Contents
### [Inputs and Readers](#AUTOGENERATED-inputs-and-readers)
* [Placeholders](#AUTOGENERATED-placeholders)
- * [tf.placeholder(dtype, shape=None, name=None)](#placeholder)
+ * [`tf.placeholder(dtype, shape=None, name=None)`](#placeholder)
* [Readers](#AUTOGENERATED-readers)
* [class tf.ReaderBase](#ReaderBase)
* [class tf.TextLineReader](#TextLineReader)
@@ -18,30 +18,30 @@ accepted by [`tf.convert_to_tensor`](framework.md#convert_to_tensor).
* [class tf.TFRecordReader](#TFRecordReader)
* [class tf.FixedLengthRecordReader](#FixedLengthRecordReader)
* [Converting](#AUTOGENERATED-converting)
- * [tf.decode_csv(records, record_defaults, field_delim=None, name=None)](#decode_csv)
- * [tf.decode_raw(bytes, out_type, little_endian=None, name=None)](#decode_raw)
+ * [`tf.decode_csv(records, record_defaults, field_delim=None, name=None)`](#decode_csv)
+ * [`tf.decode_raw(bytes, out_type, little_endian=None, name=None)`](#decode_raw)
* [Example protocol buffer](#AUTOGENERATED-example-protocol-buffer)
- * [tf.parse_example(serialized, names=None, sparse_keys=None, sparse_types=None, dense_keys=None, dense_types=None, dense_defaults=None, dense_shapes=None, name='ParseExample')](#parse_example)
- * [tf.parse_single_example(serialized, names=None, sparse_keys=None, sparse_types=None, dense_keys=None, dense_types=None, dense_defaults=None, dense_shapes=None, name='ParseSingleExample')](#parse_single_example)
+ * [`tf.parse_example(serialized, names=None, sparse_keys=None, sparse_types=None, dense_keys=None, dense_types=None, dense_defaults=None, dense_shapes=None, name='ParseExample')`](#parse_example)
+ * [`tf.parse_single_example(serialized, names=None, sparse_keys=None, sparse_types=None, dense_keys=None, dense_types=None, dense_defaults=None, dense_shapes=None, name='ParseSingleExample')`](#parse_single_example)
* [Queues](#AUTOGENERATED-queues)
* [class tf.QueueBase](#QueueBase)
* [class tf.FIFOQueue](#FIFOQueue)
* [class tf.RandomShuffleQueue](#RandomShuffleQueue)
* [Dealing with the filesystem](#AUTOGENERATED-dealing-with-the-filesystem)
- * [tf.matching_files(pattern, name=None)](#matching_files)
- * [tf.read_file(filename, name=None)](#read_file)
+ * [`tf.matching_files(pattern, name=None)`](#matching_files)
+ * [`tf.read_file(filename, name=None)`](#read_file)
* [Input pipeline](#AUTOGENERATED-input-pipeline)
* [Beginning of an input pipeline](#AUTOGENERATED-beginning-of-an-input-pipeline)
- * [tf.train.match_filenames_once(pattern, name=None)](#match_filenames_once)
- * [tf.train.limit_epochs(tensor, num_epochs=None, name=None)](#limit_epochs)
- * [tf.train.range_input_producer(limit, num_epochs=None, shuffle=True, seed=None, capacity=32, name=None)](#range_input_producer)
- * [tf.train.slice_input_producer(tensor_list, num_epochs=None, shuffle=True, seed=None, capacity=32, name=None)](#slice_input_producer)
- * [tf.train.string_input_producer(string_tensor, num_epochs=None, shuffle=True, seed=None, capacity=32, name=None)](#string_input_producer)
+ * [`tf.train.match_filenames_once(pattern, name=None)`](#match_filenames_once)
+ * [`tf.train.limit_epochs(tensor, num_epochs=None, name=None)`](#limit_epochs)
+ * [`tf.train.range_input_producer(limit, num_epochs=None, shuffle=True, seed=None, capacity=32, name=None)`](#range_input_producer)
+ * [`tf.train.slice_input_producer(tensor_list, num_epochs=None, shuffle=True, seed=None, capacity=32, name=None)`](#slice_input_producer)
+ * [`tf.train.string_input_producer(string_tensor, num_epochs=None, shuffle=True, seed=None, capacity=32, name=None)`](#string_input_producer)
* [Batching at the end of an input pipeline](#AUTOGENERATED-batching-at-the-end-of-an-input-pipeline)
- * [tf.train.batch(tensor_list, batch_size, num_threads=1, capacity=32, enqueue_many=False, shapes=None, name=None)](#batch)
- * [tf.train.batch_join(tensor_list_list, batch_size, capacity=32, enqueue_many=False, shapes=None, name=None)](#batch_join)
- * [tf.train.shuffle_batch(tensor_list, batch_size, capacity, min_after_dequeue, num_threads=1, seed=None, enqueue_many=False, shapes=None, name=None)](#shuffle_batch)
- * [tf.train.shuffle_batch_join(tensor_list_list, batch_size, capacity, min_after_dequeue, seed=None, enqueue_many=False, shapes=None, name=None)](#shuffle_batch_join)
+ * [`tf.train.batch(tensor_list, batch_size, num_threads=1, capacity=32, enqueue_many=False, shapes=None, name=None)`](#batch)
+ * [`tf.train.batch_join(tensor_list_list, batch_size, capacity=32, enqueue_many=False, shapes=None, name=None)`](#batch_join)
+ * [`tf.train.shuffle_batch(tensor_list, batch_size, capacity, min_after_dequeue, num_threads=1, seed=None, enqueue_many=False, shapes=None, name=None)`](#shuffle_batch)
+ * [`tf.train.shuffle_batch_join(tensor_list_list, batch_size, capacity, min_after_dequeue, seed=None, enqueue_many=False, shapes=None, name=None)`](#shuffle_batch_join)
<!-- TOC-END This section was generated by neural network, THANKS FOR READING! -->
@@ -54,7 +54,7 @@ data](../../how_tos/reading_data/index.md#feeding).
- - -
-### tf.placeholder(dtype, shape=None, name=None) <a class="md-anchor" id="placeholder"></a>
+### `tf.placeholder(dtype, shape=None, name=None)` <a class="md-anchor" id="placeholder"></a>
Inserts a placeholder for a tensor that will be always fed.
@@ -114,7 +114,7 @@ it is asked to produce a record (via Read()) but it has finished the
last work unit.
- - -
-#### tf.ReaderBase.__init__(reader_ref, supports_serialize=False) <a class="md-anchor" id="ReaderBase.__init__"></a>
+#### `tf.ReaderBase.__init__(reader_ref, supports_serialize=False)` <a class="md-anchor" id="ReaderBase.__init__"></a>
Creates a new ReaderBase.
@@ -128,7 +128,7 @@ Creates a new ReaderBase.
- - -
-#### tf.ReaderBase.num_records_produced(name=None) <a class="md-anchor" id="ReaderBase.num_records_produced"></a>
+#### `tf.ReaderBase.num_records_produced(name=None)` <a class="md-anchor" id="ReaderBase.num_records_produced"></a>
Returns the number of records this reader has produced.
@@ -147,7 +147,7 @@ succeeded.
- - -
-#### tf.ReaderBase.num_work_units_completed(name=None) <a class="md-anchor" id="ReaderBase.num_work_units_completed"></a>
+#### `tf.ReaderBase.num_work_units_completed(name=None)` <a class="md-anchor" id="ReaderBase.num_work_units_completed"></a>
Returns the number of work units this reader has finished processing.
@@ -163,7 +163,7 @@ Returns the number of work units this reader has finished processing.
- - -
-#### tf.ReaderBase.read(queue, name=None) <a class="md-anchor" id="ReaderBase.read"></a>
+#### `tf.ReaderBase.read(queue, name=None)` <a class="md-anchor" id="ReaderBase.read"></a>
Returns the next record (key, value pair) produced by a reader.
@@ -188,13 +188,13 @@ finished with the previous file).
- - -
-#### tf.ReaderBase.reader_ref <a class="md-anchor" id="ReaderBase.reader_ref"></a>
+#### `tf.ReaderBase.reader_ref` <a class="md-anchor" id="ReaderBase.reader_ref"></a>
Op that implements the reader.
- - -
-#### tf.ReaderBase.reset(name=None) <a class="md-anchor" id="ReaderBase.reset"></a>
+#### `tf.ReaderBase.reset(name=None)` <a class="md-anchor" id="ReaderBase.reset"></a>
Restore a reader to its initial clean state.
@@ -210,7 +210,7 @@ Restore a reader to its initial clean state.
- - -
-#### tf.ReaderBase.restore_state(state, name=None) <a class="md-anchor" id="ReaderBase.restore_state"></a>
+#### `tf.ReaderBase.restore_state(state, name=None)` <a class="md-anchor" id="ReaderBase.restore_state"></a>
Restore a reader to a previously saved state.
@@ -231,7 +231,7 @@ Unimplemented error.
- - -
-#### tf.ReaderBase.serialize_state(name=None) <a class="md-anchor" id="ReaderBase.serialize_state"></a>
+#### `tf.ReaderBase.serialize_state(name=None)` <a class="md-anchor" id="ReaderBase.serialize_state"></a>
Produce a string tensor that encodes the state of a reader.
@@ -250,7 +250,7 @@ Unimplemented error.
- - -
-#### tf.ReaderBase.supports_serialize <a class="md-anchor" id="ReaderBase.supports_serialize"></a>
+#### `tf.ReaderBase.supports_serialize` <a class="md-anchor" id="ReaderBase.supports_serialize"></a>
Whether the Reader implementation can serialize its state.
@@ -265,7 +265,7 @@ Newlines are stripped from the output.
See ReaderBase for supported methods.
- - -
-#### tf.TextLineReader.__init__(skip_header_lines=None, name=None) <a class="md-anchor" id="TextLineReader.__init__"></a>
+#### `tf.TextLineReader.__init__(skip_header_lines=None, name=None)` <a class="md-anchor" id="TextLineReader.__init__"></a>
Create a TextLineReader.
@@ -279,7 +279,7 @@ Create a TextLineReader.
- - -
-#### tf.TextLineReader.num_records_produced(name=None) <a class="md-anchor" id="TextLineReader.num_records_produced"></a>
+#### `tf.TextLineReader.num_records_produced(name=None)` <a class="md-anchor" id="TextLineReader.num_records_produced"></a>
Returns the number of records this reader has produced.
@@ -298,7 +298,7 @@ succeeded.
- - -
-#### tf.TextLineReader.num_work_units_completed(name=None) <a class="md-anchor" id="TextLineReader.num_work_units_completed"></a>
+#### `tf.TextLineReader.num_work_units_completed(name=None)` <a class="md-anchor" id="TextLineReader.num_work_units_completed"></a>
Returns the number of work units this reader has finished processing.
@@ -314,7 +314,7 @@ Returns the number of work units this reader has finished processing.
- - -
-#### tf.TextLineReader.read(queue, name=None) <a class="md-anchor" id="TextLineReader.read"></a>
+#### `tf.TextLineReader.read(queue, name=None)` <a class="md-anchor" id="TextLineReader.read"></a>
Returns the next record (key, value pair) produced by a reader.
@@ -339,13 +339,13 @@ finished with the previous file).
- - -
-#### tf.TextLineReader.reader_ref <a class="md-anchor" id="TextLineReader.reader_ref"></a>
+#### `tf.TextLineReader.reader_ref` <a class="md-anchor" id="TextLineReader.reader_ref"></a>
Op that implements the reader.
- - -
-#### tf.TextLineReader.reset(name=None) <a class="md-anchor" id="TextLineReader.reset"></a>
+#### `tf.TextLineReader.reset(name=None)` <a class="md-anchor" id="TextLineReader.reset"></a>
Restore a reader to its initial clean state.
@@ -361,7 +361,7 @@ Restore a reader to its initial clean state.
- - -
-#### tf.TextLineReader.restore_state(state, name=None) <a class="md-anchor" id="TextLineReader.restore_state"></a>
+#### `tf.TextLineReader.restore_state(state, name=None)` <a class="md-anchor" id="TextLineReader.restore_state"></a>
Restore a reader to a previously saved state.
@@ -382,7 +382,7 @@ Unimplemented error.
- - -
-#### tf.TextLineReader.serialize_state(name=None) <a class="md-anchor" id="TextLineReader.serialize_state"></a>
+#### `tf.TextLineReader.serialize_state(name=None)` <a class="md-anchor" id="TextLineReader.serialize_state"></a>
Produce a string tensor that encodes the state of a reader.
@@ -401,7 +401,7 @@ Unimplemented error.
- - -
-#### tf.TextLineReader.supports_serialize <a class="md-anchor" id="TextLineReader.supports_serialize"></a>
+#### `tf.TextLineReader.supports_serialize` <a class="md-anchor" id="TextLineReader.supports_serialize"></a>
Whether the Reader implementation can serialize its state.
@@ -418,7 +418,7 @@ be a filename (key) and the contents of that file (value).
See ReaderBase for supported methods.
- - -
-#### tf.WholeFileReader.__init__(name=None) <a class="md-anchor" id="WholeFileReader.__init__"></a>
+#### `tf.WholeFileReader.__init__(name=None)` <a class="md-anchor" id="WholeFileReader.__init__"></a>
Create a WholeFileReader.
@@ -430,7 +430,7 @@ Create a WholeFileReader.
- - -
-#### tf.WholeFileReader.num_records_produced(name=None) <a class="md-anchor" id="WholeFileReader.num_records_produced"></a>
+#### `tf.WholeFileReader.num_records_produced(name=None)` <a class="md-anchor" id="WholeFileReader.num_records_produced"></a>
Returns the number of records this reader has produced.
@@ -449,7 +449,7 @@ succeeded.
- - -
-#### tf.WholeFileReader.num_work_units_completed(name=None) <a class="md-anchor" id="WholeFileReader.num_work_units_completed"></a>
+#### `tf.WholeFileReader.num_work_units_completed(name=None)` <a class="md-anchor" id="WholeFileReader.num_work_units_completed"></a>
Returns the number of work units this reader has finished processing.
@@ -465,7 +465,7 @@ Returns the number of work units this reader has finished processing.
- - -
-#### tf.WholeFileReader.read(queue, name=None) <a class="md-anchor" id="WholeFileReader.read"></a>
+#### `tf.WholeFileReader.read(queue, name=None)` <a class="md-anchor" id="WholeFileReader.read"></a>
Returns the next record (key, value pair) produced by a reader.
@@ -490,13 +490,13 @@ finished with the previous file).
- - -
-#### tf.WholeFileReader.reader_ref <a class="md-anchor" id="WholeFileReader.reader_ref"></a>
+#### `tf.WholeFileReader.reader_ref` <a class="md-anchor" id="WholeFileReader.reader_ref"></a>
Op that implements the reader.
- - -
-#### tf.WholeFileReader.reset(name=None) <a class="md-anchor" id="WholeFileReader.reset"></a>
+#### `tf.WholeFileReader.reset(name=None)` <a class="md-anchor" id="WholeFileReader.reset"></a>
Restore a reader to its initial clean state.
@@ -512,7 +512,7 @@ Restore a reader to its initial clean state.
- - -
-#### tf.WholeFileReader.restore_state(state, name=None) <a class="md-anchor" id="WholeFileReader.restore_state"></a>
+#### `tf.WholeFileReader.restore_state(state, name=None)` <a class="md-anchor" id="WholeFileReader.restore_state"></a>
Restore a reader to a previously saved state.
@@ -533,7 +533,7 @@ Unimplemented error.
- - -
-#### tf.WholeFileReader.serialize_state(name=None) <a class="md-anchor" id="WholeFileReader.serialize_state"></a>
+#### `tf.WholeFileReader.serialize_state(name=None)` <a class="md-anchor" id="WholeFileReader.serialize_state"></a>
Produce a string tensor that encodes the state of a reader.
@@ -552,7 +552,7 @@ Unimplemented error.
- - -
-#### tf.WholeFileReader.supports_serialize <a class="md-anchor" id="WholeFileReader.supports_serialize"></a>
+#### `tf.WholeFileReader.supports_serialize` <a class="md-anchor" id="WholeFileReader.supports_serialize"></a>
Whether the Reader implementation can serialize its state.
@@ -569,7 +569,7 @@ work string and output (work, work).
See ReaderBase for supported methods.
- - -
-#### tf.IdentityReader.__init__(name=None) <a class="md-anchor" id="IdentityReader.__init__"></a>
+#### `tf.IdentityReader.__init__(name=None)` <a class="md-anchor" id="IdentityReader.__init__"></a>
Create a IdentityReader.
@@ -581,7 +581,7 @@ Create a IdentityReader.
- - -
-#### tf.IdentityReader.num_records_produced(name=None) <a class="md-anchor" id="IdentityReader.num_records_produced"></a>
+#### `tf.IdentityReader.num_records_produced(name=None)` <a class="md-anchor" id="IdentityReader.num_records_produced"></a>
Returns the number of records this reader has produced.
@@ -600,7 +600,7 @@ succeeded.
- - -
-#### tf.IdentityReader.num_work_units_completed(name=None) <a class="md-anchor" id="IdentityReader.num_work_units_completed"></a>
+#### `tf.IdentityReader.num_work_units_completed(name=None)` <a class="md-anchor" id="IdentityReader.num_work_units_completed"></a>
Returns the number of work units this reader has finished processing.
@@ -616,7 +616,7 @@ Returns the number of work units this reader has finished processing.
- - -
-#### tf.IdentityReader.read(queue, name=None) <a class="md-anchor" id="IdentityReader.read"></a>
+#### `tf.IdentityReader.read(queue, name=None)` <a class="md-anchor" id="IdentityReader.read"></a>
Returns the next record (key, value pair) produced by a reader.
@@ -641,13 +641,13 @@ finished with the previous file).
- - -
-#### tf.IdentityReader.reader_ref <a class="md-anchor" id="IdentityReader.reader_ref"></a>
+#### `tf.IdentityReader.reader_ref` <a class="md-anchor" id="IdentityReader.reader_ref"></a>
Op that implements the reader.
- - -
-#### tf.IdentityReader.reset(name=None) <a class="md-anchor" id="IdentityReader.reset"></a>
+#### `tf.IdentityReader.reset(name=None)` <a class="md-anchor" id="IdentityReader.reset"></a>
Restore a reader to its initial clean state.
@@ -663,7 +663,7 @@ Restore a reader to its initial clean state.
- - -
-#### tf.IdentityReader.restore_state(state, name=None) <a class="md-anchor" id="IdentityReader.restore_state"></a>
+#### `tf.IdentityReader.restore_state(state, name=None)` <a class="md-anchor" id="IdentityReader.restore_state"></a>
Restore a reader to a previously saved state.
@@ -684,7 +684,7 @@ Unimplemented error.
- - -
-#### tf.IdentityReader.serialize_state(name=None) <a class="md-anchor" id="IdentityReader.serialize_state"></a>
+#### `tf.IdentityReader.serialize_state(name=None)` <a class="md-anchor" id="IdentityReader.serialize_state"></a>
Produce a string tensor that encodes the state of a reader.
@@ -703,7 +703,7 @@ Unimplemented error.
- - -
-#### tf.IdentityReader.supports_serialize <a class="md-anchor" id="IdentityReader.supports_serialize"></a>
+#### `tf.IdentityReader.supports_serialize` <a class="md-anchor" id="IdentityReader.supports_serialize"></a>
Whether the Reader implementation can serialize its state.
@@ -717,7 +717,7 @@ A Reader that outputs the records from a TFRecords file.
See ReaderBase for supported methods.
- - -
-#### tf.TFRecordReader.__init__(name=None) <a class="md-anchor" id="TFRecordReader.__init__"></a>
+#### `tf.TFRecordReader.__init__(name=None)` <a class="md-anchor" id="TFRecordReader.__init__"></a>
Create a TFRecordReader.
@@ -729,7 +729,7 @@ Create a TFRecordReader.
- - -
-#### tf.TFRecordReader.num_records_produced(name=None) <a class="md-anchor" id="TFRecordReader.num_records_produced"></a>
+#### `tf.TFRecordReader.num_records_produced(name=None)` <a class="md-anchor" id="TFRecordReader.num_records_produced"></a>
Returns the number of records this reader has produced.
@@ -748,7 +748,7 @@ succeeded.
- - -
-#### tf.TFRecordReader.num_work_units_completed(name=None) <a class="md-anchor" id="TFRecordReader.num_work_units_completed"></a>
+#### `tf.TFRecordReader.num_work_units_completed(name=None)` <a class="md-anchor" id="TFRecordReader.num_work_units_completed"></a>
Returns the number of work units this reader has finished processing.
@@ -764,7 +764,7 @@ Returns the number of work units this reader has finished processing.
- - -
-#### tf.TFRecordReader.read(queue, name=None) <a class="md-anchor" id="TFRecordReader.read"></a>
+#### `tf.TFRecordReader.read(queue, name=None)` <a class="md-anchor" id="TFRecordReader.read"></a>
Returns the next record (key, value pair) produced by a reader.
@@ -789,13 +789,13 @@ finished with the previous file).
- - -
-#### tf.TFRecordReader.reader_ref <a class="md-anchor" id="TFRecordReader.reader_ref"></a>
+#### `tf.TFRecordReader.reader_ref` <a class="md-anchor" id="TFRecordReader.reader_ref"></a>
Op that implements the reader.
- - -
-#### tf.TFRecordReader.reset(name=None) <a class="md-anchor" id="TFRecordReader.reset"></a>
+#### `tf.TFRecordReader.reset(name=None)` <a class="md-anchor" id="TFRecordReader.reset"></a>
Restore a reader to its initial clean state.
@@ -811,7 +811,7 @@ Restore a reader to its initial clean state.
- - -
-#### tf.TFRecordReader.restore_state(state, name=None) <a class="md-anchor" id="TFRecordReader.restore_state"></a>
+#### `tf.TFRecordReader.restore_state(state, name=None)` <a class="md-anchor" id="TFRecordReader.restore_state"></a>
Restore a reader to a previously saved state.
@@ -832,7 +832,7 @@ Unimplemented error.
- - -
-#### tf.TFRecordReader.serialize_state(name=None) <a class="md-anchor" id="TFRecordReader.serialize_state"></a>
+#### `tf.TFRecordReader.serialize_state(name=None)` <a class="md-anchor" id="TFRecordReader.serialize_state"></a>
Produce a string tensor that encodes the state of a reader.
@@ -851,7 +851,7 @@ Unimplemented error.
- - -
-#### tf.TFRecordReader.supports_serialize <a class="md-anchor" id="TFRecordReader.supports_serialize"></a>
+#### `tf.TFRecordReader.supports_serialize` <a class="md-anchor" id="TFRecordReader.supports_serialize"></a>
Whether the Reader implementation can serialize its state.
@@ -865,7 +865,7 @@ A Reader that outputs fixed-length records from a file.
See ReaderBase for supported methods.
- - -
-#### tf.FixedLengthRecordReader.__init__(record_bytes, header_bytes=None, footer_bytes=None, name=None) <a class="md-anchor" id="FixedLengthRecordReader.__init__"></a>
+#### `tf.FixedLengthRecordReader.__init__(record_bytes, header_bytes=None, footer_bytes=None, name=None)` <a class="md-anchor" id="FixedLengthRecordReader.__init__"></a>
Create a FixedLengthRecordReader.
@@ -880,7 +880,7 @@ Create a FixedLengthRecordReader.
- - -
-#### tf.FixedLengthRecordReader.num_records_produced(name=None) <a class="md-anchor" id="FixedLengthRecordReader.num_records_produced"></a>
+#### `tf.FixedLengthRecordReader.num_records_produced(name=None)` <a class="md-anchor" id="FixedLengthRecordReader.num_records_produced"></a>
Returns the number of records this reader has produced.
@@ -899,7 +899,7 @@ succeeded.
- - -
-#### tf.FixedLengthRecordReader.num_work_units_completed(name=None) <a class="md-anchor" id="FixedLengthRecordReader.num_work_units_completed"></a>
+#### `tf.FixedLengthRecordReader.num_work_units_completed(name=None)` <a class="md-anchor" id="FixedLengthRecordReader.num_work_units_completed"></a>
Returns the number of work units this reader has finished processing.
@@ -915,7 +915,7 @@ Returns the number of work units this reader has finished processing.
- - -
-#### tf.FixedLengthRecordReader.read(queue, name=None) <a class="md-anchor" id="FixedLengthRecordReader.read"></a>
+#### `tf.FixedLengthRecordReader.read(queue, name=None)` <a class="md-anchor" id="FixedLengthRecordReader.read"></a>
Returns the next record (key, value pair) produced by a reader.
@@ -940,13 +940,13 @@ finished with the previous file).
- - -
-#### tf.FixedLengthRecordReader.reader_ref <a class="md-anchor" id="FixedLengthRecordReader.reader_ref"></a>
+#### `tf.FixedLengthRecordReader.reader_ref` <a class="md-anchor" id="FixedLengthRecordReader.reader_ref"></a>
Op that implements the reader.
- - -
-#### tf.FixedLengthRecordReader.reset(name=None) <a class="md-anchor" id="FixedLengthRecordReader.reset"></a>
+#### `tf.FixedLengthRecordReader.reset(name=None)` <a class="md-anchor" id="FixedLengthRecordReader.reset"></a>
Restore a reader to its initial clean state.
@@ -962,7 +962,7 @@ Restore a reader to its initial clean state.
- - -
-#### tf.FixedLengthRecordReader.restore_state(state, name=None) <a class="md-anchor" id="FixedLengthRecordReader.restore_state"></a>
+#### `tf.FixedLengthRecordReader.restore_state(state, name=None)` <a class="md-anchor" id="FixedLengthRecordReader.restore_state"></a>
Restore a reader to a previously saved state.
@@ -983,7 +983,7 @@ Unimplemented error.
- - -
-#### tf.FixedLengthRecordReader.serialize_state(name=None) <a class="md-anchor" id="FixedLengthRecordReader.serialize_state"></a>
+#### `tf.FixedLengthRecordReader.serialize_state(name=None)` <a class="md-anchor" id="FixedLengthRecordReader.serialize_state"></a>
Produce a string tensor that encodes the state of a reader.
@@ -1002,7 +1002,7 @@ Unimplemented error.
- - -
-#### tf.FixedLengthRecordReader.supports_serialize <a class="md-anchor" id="FixedLengthRecordReader.supports_serialize"></a>
+#### `tf.FixedLengthRecordReader.supports_serialize` <a class="md-anchor" id="FixedLengthRecordReader.supports_serialize"></a>
Whether the Reader implementation can serialize its state.
@@ -1015,7 +1015,7 @@ formats into tensors.
- - -
-### tf.decode_csv(records, record_defaults, field_delim=None, name=None) <a class="md-anchor" id="decode_csv"></a>
+### `tf.decode_csv(records, record_defaults, field_delim=None, name=None)` <a class="md-anchor" id="decode_csv"></a>
Convert CSV records to tensors. Each column maps to one tensor.
@@ -1044,7 +1044,7 @@ Note that we allow leading and trailing spaces with int or float field.
- - -
-### tf.decode_raw(bytes, out_type, little_endian=None, name=None) <a class="md-anchor" id="decode_raw"></a>
+### `tf.decode_raw(bytes, out_type, little_endian=None, name=None)` <a class="md-anchor" id="decode_raw"></a>
Reinterpret the bytes of a string as a vector of numbers.
@@ -1081,7 +1081,7 @@ here](https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/core/ex
- - -
-### tf.parse_example(serialized, names=None, sparse_keys=None, sparse_types=None, dense_keys=None, dense_types=None, dense_defaults=None, dense_shapes=None, name='ParseExample') <a class="md-anchor" id="parse_example"></a>
+### `tf.parse_example(serialized, names=None, sparse_keys=None, sparse_types=None, dense_keys=None, dense_types=None, dense_defaults=None, dense_shapes=None, name='ParseExample')` <a class="md-anchor" id="parse_example"></a>
Parses `Example` protos.
@@ -1255,7 +1255,7 @@ And the expected output is:
- - -
-### tf.parse_single_example(serialized, names=None, sparse_keys=None, sparse_types=None, dense_keys=None, dense_types=None, dense_defaults=None, dense_shapes=None, name='ParseSingleExample') <a class="md-anchor" id="parse_single_example"></a>
+### `tf.parse_single_example(serialized, names=None, sparse_keys=None, sparse_types=None, dense_keys=None, dense_types=None, dense_defaults=None, dense_shapes=None, name='ParseSingleExample')` <a class="md-anchor" id="parse_single_example"></a>
Parses a single `Example` proto.
@@ -1329,7 +1329,7 @@ them.
- - -
-#### tf.QueueBase.enqueue(vals, name=None) <a class="md-anchor" id="QueueBase.enqueue"></a>
+#### `tf.QueueBase.enqueue(vals, name=None)` <a class="md-anchor" id="QueueBase.enqueue"></a>
Enqueues one element to this queue.
@@ -1349,7 +1349,7 @@ until the element has been enqueued.
- - -
-#### tf.QueueBase.enqueue_many(vals, name=None) <a class="md-anchor" id="QueueBase.enqueue_many"></a>
+#### `tf.QueueBase.enqueue_many(vals, name=None)` <a class="md-anchor" id="QueueBase.enqueue_many"></a>
Enqueues zero or elements to this queue.
@@ -1375,7 +1375,7 @@ until all of the elements have been enqueued.
- - -
-#### tf.QueueBase.dequeue(name=None) <a class="md-anchor" id="QueueBase.dequeue"></a>
+#### `tf.QueueBase.dequeue(name=None)` <a class="md-anchor" id="QueueBase.dequeue"></a>
Dequeues one element from this queue.
@@ -1394,7 +1394,7 @@ until there is an element to dequeue.
- - -
-#### tf.QueueBase.dequeue_many(n, name=None) <a class="md-anchor" id="QueueBase.dequeue_many"></a>
+#### `tf.QueueBase.dequeue_many(n, name=None)` <a class="md-anchor" id="QueueBase.dequeue_many"></a>
Dequeues and concatenates `n` elements from this queue.
@@ -1419,7 +1419,7 @@ executes, it will block until `n` elements have been dequeued.
- - -
-#### tf.QueueBase.size(name=None) <a class="md-anchor" id="QueueBase.size"></a>
+#### `tf.QueueBase.size(name=None)` <a class="md-anchor" id="QueueBase.size"></a>
Compute the number of elements in this queue.
@@ -1436,7 +1436,7 @@ Compute the number of elements in this queue.
- - -
-#### tf.QueueBase.close(cancel_pending_enqueues=False, name=None) <a class="md-anchor" id="QueueBase.close"></a>
+#### `tf.QueueBase.close(cancel_pending_enqueues=False, name=None)` <a class="md-anchor" id="QueueBase.close"></a>
Closes this queue.
@@ -1466,7 +1466,7 @@ be cancelled.
#### Other Methods <a class="md-anchor" id="AUTOGENERATED-other-methods"></a>
- - -
-#### tf.QueueBase.__init__(dtypes, shapes, queue_ref) <a class="md-anchor" id="QueueBase.__init__"></a>
+#### `tf.QueueBase.__init__(dtypes, shapes, queue_ref)` <a class="md-anchor" id="QueueBase.__init__"></a>
Constructs a queue object from a queue reference.
@@ -1484,19 +1484,19 @@ Constructs a queue object from a queue reference.
- - -
-#### tf.QueueBase.dtypes <a class="md-anchor" id="QueueBase.dtypes"></a>
+#### `tf.QueueBase.dtypes` <a class="md-anchor" id="QueueBase.dtypes"></a>
The list of dtypes for each component of a queue element.
- - -
-#### tf.QueueBase.name <a class="md-anchor" id="QueueBase.name"></a>
+#### `tf.QueueBase.name` <a class="md-anchor" id="QueueBase.name"></a>
The name of the underlying queue.
- - -
-#### tf.QueueBase.queue_ref <a class="md-anchor" id="QueueBase.queue_ref"></a>
+#### `tf.QueueBase.queue_ref` <a class="md-anchor" id="QueueBase.queue_ref"></a>
The underlying queue reference.
@@ -1512,7 +1512,7 @@ this class.
- - -
-#### tf.FIFOQueue.__init__(capacity, dtypes, shapes=None, shared_name=None, name='fifo_queue') <a class="md-anchor" id="FIFOQueue.__init__"></a>
+#### `tf.FIFOQueue.__init__(capacity, dtypes, shapes=None, shared_name=None, name='fifo_queue')` <a class="md-anchor" id="FIFOQueue.__init__"></a>
Creates a queue that dequeues elements in a first-in first-out order.
@@ -1555,7 +1555,7 @@ this class.
- - -
-#### tf.RandomShuffleQueue.__init__(capacity, min_after_dequeue, dtypes, shapes=None, seed=None, shared_name=None, name='random_shuffle_queue') <a class="md-anchor" id="RandomShuffleQueue.__init__"></a>
+#### `tf.RandomShuffleQueue.__init__(capacity, min_after_dequeue, dtypes, shapes=None, seed=None, shared_name=None, name='random_shuffle_queue')` <a class="md-anchor" id="RandomShuffleQueue.__init__"></a>
Create a queue that dequeues elements in a random order.
@@ -1604,7 +1604,7 @@ queue has been closed.
- - -
-### tf.matching_files(pattern, name=None) <a class="md-anchor" id="matching_files"></a>
+### `tf.matching_files(pattern, name=None)` <a class="md-anchor" id="matching_files"></a>
Returns the set of files matching a pattern.
@@ -1624,7 +1624,7 @@ basename portion of the pattern, not in the directory portion.
- - -
-### tf.read_file(filename, name=None) <a class="md-anchor" id="read_file"></a>
+### `tf.read_file(filename, name=None)` <a class="md-anchor" id="read_file"></a>
Reads and outputs the entire contents of the input filename.
@@ -1653,7 +1653,7 @@ The "producer" functions add a queue to the graph and a corresponding
- - -
-### tf.train.match_filenames_once(pattern, name=None) <a class="md-anchor" id="match_filenames_once"></a>
+### `tf.train.match_filenames_once(pattern, name=None)` <a class="md-anchor" id="match_filenames_once"></a>
Save the list of files matching pattern, so it is only computed once.
@@ -1670,7 +1670,7 @@ Save the list of files matching pattern, so it is only computed once.
- - -
-### tf.train.limit_epochs(tensor, num_epochs=None, name=None) <a class="md-anchor" id="limit_epochs"></a>
+### `tf.train.limit_epochs(tensor, num_epochs=None, name=None)` <a class="md-anchor" id="limit_epochs"></a>
Returns tensor num_epochs times and then raises an OutOfRange error.
@@ -1689,7 +1689,7 @@ Returns tensor num_epochs times and then raises an OutOfRange error.
- - -
-### tf.train.range_input_producer(limit, num_epochs=None, shuffle=True, seed=None, capacity=32, name=None) <a class="md-anchor" id="range_input_producer"></a>
+### `tf.train.range_input_producer(limit, num_epochs=None, shuffle=True, seed=None, capacity=32, name=None)` <a class="md-anchor" id="range_input_producer"></a>
Produces the integers from 0 to limit-1 in a queue.
@@ -1715,7 +1715,7 @@ Produces the integers from 0 to limit-1 in a queue.
- - -
-### tf.train.slice_input_producer(tensor_list, num_epochs=None, shuffle=True, seed=None, capacity=32, name=None) <a class="md-anchor" id="slice_input_producer"></a>
+### `tf.train.slice_input_producer(tensor_list, num_epochs=None, shuffle=True, seed=None, capacity=32, name=None)` <a class="md-anchor" id="slice_input_producer"></a>
Produces a slice of each Tensor in tensor_list.
@@ -1744,7 +1744,7 @@ is added to the current Graph's QUEUE_RUNNER collection.
- - -
-### tf.train.string_input_producer(string_tensor, num_epochs=None, shuffle=True, seed=None, capacity=32, name=None) <a class="md-anchor" id="string_input_producer"></a>
+### `tf.train.string_input_producer(string_tensor, num_epochs=None, shuffle=True, seed=None, capacity=32, name=None)` <a class="md-anchor" id="string_input_producer"></a>
Output strings (e.g. filenames) to a queue for an input pipeline.
@@ -1791,70 +1791,70 @@ want them run by N threads.
- - -
-### tf.train.batch(tensor_list, batch_size, num_threads=1, capacity=32, enqueue_many=False, shapes=None, name=None) <a class="md-anchor" id="batch"></a>
+### `tf.train.batch(tensor_list, batch_size, num_threads=1, capacity=32, enqueue_many=False, shapes=None, name=None)` <a class="md-anchor" id="batch"></a>
-Run tensor_list to fill a queue to create batches.
+Creates batches of tensors in `tensor_list`.
-Implemented using a queue -- a QueueRunner for the queue
-is added to the current Graph's QUEUE_RUNNER collection.
+This function is implemented using a queue. A `QueueRunner` for the
+queue is added to the current `Graph`'s `QUEUE_RUNNER` collection.
+
+If `enqueue_many` is `False`, `tensor_list` is assumed to represent a
+single example. An input tensor with shape `[x, y, z]` will be output
+as a tensor with shape `[batch_size, x, y, z]`.
+
+If `enqueue_many` is `True`, `tensor_list` is assumed to represent a
+batch of examples, where the first dimension is indexed by example,
+and all members of `tensor_list` should have the same size in the
+first dimension. If an input tensor has shape `[*, x, y, z]`, the
+output will have shape `[batch_size, x, y, z]`. The `capacity` argument
+controls the how long the prefetching is allowed to grow the queues.
##### Args: <a class="md-anchor" id="AUTOGENERATED-args-"></a>
* <b>tensor_list</b>: The list of tensors to enqueue.
* <b>batch_size</b>: The new batch size pulled from the queue.
-* <b>num_threads</b>: The number of threads enqueuing tensor_list.
-* <b>capacity</b>: Maximum number of elements in the queue, controls the
- how far ahead the prefetching allowed is allowed to get and
- memory usage.
-* <b>enqueue_many</b>: If False, tensor_list is assumed to represent a
- single example. If True, tensor_list is assumed to represent
- a batch of examples, where the first dimension is indexed by
- example, and all members of tensor_list should have the same
- size in the first dimension.
-* <b>shapes</b>: Optional. The shapes for each example. Defaults to the
- inferred shapes for tensor_list (leaving off the first dimension
- if enqueue_many is True).
-* <b>name</b>: A name for the operations (optional).
+* <b>num_threads</b>: The number of threads enqueuing `tensor_list`.
+* <b>capacity</b>: An integer. The maximum number of elements in the queue.
+* <b>enqueue_many</b>: Whether each tensor in `tensor_list` is a single example.
+* <b>shapes</b>: (Optional) The shapes for each example. Defaults to the
+ inferred shapes for `tensor_list`.
+* <b>name</b>: (Optional) A name for the operations.
##### Returns: <a class="md-anchor" id="AUTOGENERATED-returns-"></a>
- A list of tensors with the same number and types as tensor_list.
- If enqueue_many is false, then an input tensor with shape
- `[x, y, z]` will be output as a tensor with shape
- `[batch_size, x, y, z]`. If enqueue_many is True, and an
- input tensor has shape `[*, x, y, z]`, the the output will have
- shape `[batch_size, x, y, z]`.
+ A list of tensors with the same number and types as `tensor_list`.
- - -
-### tf.train.batch_join(tensor_list_list, batch_size, capacity=32, enqueue_many=False, shapes=None, name=None) <a class="md-anchor" id="batch_join"></a>
+### `tf.train.batch_join(tensor_list_list, batch_size, capacity=32, enqueue_many=False, shapes=None, name=None)` <a class="md-anchor" id="batch_join"></a>
-Run a list of tensors to fill a queue to create batches of examples.
+Runs a list of tensors to fill a queue to create batches of examples.
Enqueues a different list of tensors in different threads.
Implemented using a queue -- a `QueueRunner` for the queue
is added to the current `Graph`'s `QUEUE_RUNNER` collection.
-`len(tensor_list_list)` threads will be started, with thread `i` enqueuing
-the tensors from tensor_list[i]. `tensor_list[i1][j]` must match
-`tensor_list[i2][j]` in type and shape, except in the first dimension if
-`enqueue_many` is true.
+`len(tensor_list_list)` threads will be started,
+with thread `i` enqueuing the tensors from
+`tensor_list_list[i]`. `tensor_list_list[i1][j]` must match
+`tensor_list_list[i2][j]` in type and shape, except in the first
+dimension if `enqueue_many` is true.
-If `enqueue_many` is false, each `tensor_list_list[i]` is assumed to
-represent a single example. Otherwise, `tensor_list_list[i]` is assumed to
-represent a batch of examples, where the first dimension is indexed by
-example, and all members of `tensor_list_list[i]` should have the same size
-in the first dimension.
+If `enqueue_many` is `False`, each `tensor_list_list[i]` is assumed
+to represent a single example. An input tensor `x` will be output as a
+tensor with shape `[batch_size] + x.shape`.
-If `enqueue_many` is false, then an input tensor `x` will be output as a
-tensor with shape `[batch_size] + x.shape`. If `enqueue_many` is true, the
-slices of any input tensor `x` are treated as examples, and the output tensors
-will have shape `[batch_size] + x.shape[1:]`.
+If `enqueue_many` is `True`, `tensor_list_list[i]` is assumed to
+represent a batch of examples, where the first dimension is indexed
+by example, and all members of `tensor_list_list[i]` should have the
+same size in the first dimension. The slices of any input tensor
+`x` are treated as examples, and the output tensors will have shape
+`[batch_size] + x.shape[1:]`.
-The `capacity` argument controls the how long the prefetching
-is allowed to grow the queues.
+The `capacity` argument controls the how long the prefetching is allowed to
+grow the queues.
##### Args: <a class="md-anchor" id="AUTOGENERATED-args-"></a>
@@ -1866,7 +1866,7 @@ is allowed to grow the queues.
example.
* <b>shapes</b>: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensor_list_list[i]`.
-* <b>name</b>: A name for the operations (optional).
+* <b>name</b>: (Optional) A name for the operations.
##### Returns: <a class="md-anchor" id="AUTOGENERATED-returns-"></a>
@@ -1876,96 +1876,111 @@ is allowed to grow the queues.
- - -
-### tf.train.shuffle_batch(tensor_list, batch_size, capacity, min_after_dequeue, num_threads=1, seed=None, enqueue_many=False, shapes=None, name=None) <a class="md-anchor" id="shuffle_batch"></a>
+### `tf.train.shuffle_batch(tensor_list, batch_size, capacity, min_after_dequeue, num_threads=1, seed=None, enqueue_many=False, shapes=None, name=None)` <a class="md-anchor" id="shuffle_batch"></a>
-Create batches by randomly shuffling tensors.
+Creates batches by randomly shuffling tensors.
+
+This function adds the following to the current `Graph`:
+
+* A shuffling queue into which tensors from `tensor_list` are enqueued.
+* A `dequeue_many` operation to create batches from the queue.
+* A `QueueRunner` to `QUEUE_RUNNER` collection, to enqueue the tensors
+ from `tensor_list`.
+
+If `enqueue_many` is `False`, `tensor_list` is assumed to represent a
+single example. An input tensor with shape `[x, y, z]` will be output
+as a tensor with shape `[batch_size, x, y, z]`.
+
+If `enqueue_many` is `True`, `tensor_list` is assumed to represent a
+batch of examples, where the first dimension is indexed by example,
+and all members of `tensor_list` should have the same size in the
+first dimension. If an input tensor has shape `[*, x, y, z]`, the
+output will have shape `[batch_size, x, y, z]`.
-This adds:
+The `capacity` argument controls the how long the prefetching is allowed to
+grow the queues.
-* a shuffling queue into which tensors from tensor_list are enqueued.
-* a dequeue many operation to create batches from the queue,
-* and a QueueRunner is added to the current Graph's QUEUE_RUNNER collection,
- to enqueue the tensors from tensor_list.
+For example:
+
+```python
+# Creates batches of 32 images and 32 labels.
+image_batch, label_batch = tf.train.shuffle_batch(
+ [single_image, single_label],
+ batch_size=32,
+ num_threads=4,
+ capacity=50000,
+ min_after_dequeue=10000)
+```
##### Args: <a class="md-anchor" id="AUTOGENERATED-args-"></a>
* <b>tensor_list</b>: The list of tensors to enqueue.
* <b>batch_size</b>: The new batch size pulled from the queue.
-* <b>capacity</b>: Maximum number of elements in the queue, controls the
- how far ahead the prefetching allowed is allowed to get and
- memory usage.
+* <b>capacity</b>: An integer. The maximum number of elements in the queue.
* <b>min_after_dequeue</b>: Minimum number elements in the queue after a
dequeue, used to ensure a level of mixing of elements.
-* <b>num_threads</b>: The number of threads enqueuing tensor_list.
+* <b>num_threads</b>: The number of threads enqueuing `tensor_list`.
* <b>seed</b>: Seed for the random shuffling within the queue.
-* <b>enqueue_many</b>: If False, tensor_list is assumed to represent a
- single example. If True, tensor_list is assumed to represent
- a batch of examples, where the first dimension is indexed by
- example, and all members of tensor_list should have the same
- size in the first dimension.
-* <b>shapes</b>: Optional. The shapes for each example. Defaults to the
- inferred shapes for tensor_list (leaving off the first dimension
- if enqueue_many is True).
-* <b>name</b>: A name for the operations (optional).
+* <b>enqueue_many</b>: Whether each tensor in `tensor_list` is a single example.
+* <b>shapes</b>: (Optional) The shapes for each example. Defaults to the
+ inferred shapes for `tensor_list`.
+* <b>name</b>: (Optional) A name for the operations.
##### Returns: <a class="md-anchor" id="AUTOGENERATED-returns-"></a>
- A list of tensors with the same number and types as tensor_list.
- If enqueue_many is false, then an input tensor with shape
- `[x, y, z]` will be output as a tensor with shape
- `[batch_size, x, y, z]`. If enqueue_many is True, and an
- input tensor has shape `[*, x, y, z]`, the the output will have
- shape `[batch_size, x, y, z]`.
+ A list of tensors with the same number and types as `tensor_list`.
- - -
-### tf.train.shuffle_batch_join(tensor_list_list, batch_size, capacity, min_after_dequeue, seed=None, enqueue_many=False, shapes=None, name=None) <a class="md-anchor" id="shuffle_batch_join"></a>
+### `tf.train.shuffle_batch_join(tensor_list_list, batch_size, capacity, min_after_dequeue, seed=None, enqueue_many=False, shapes=None, name=None)` <a class="md-anchor" id="shuffle_batch_join"></a>
Create batches by randomly shuffling tensors.
This version enqueues a different list of tensors in different threads.
-It adds:
+It adds the following to the current `Graph`:
+
+* A shuffling queue into which tensors from `tensor_list_list` are enqueued.
+* A `dequeue_many` operation to create batches from the queue.
+* A `QueueRunner` to `QUEUE_RUNNER` collection, to enqueue the tensors
+ from `tensor_list_list`.
+
+`len(tensor_list_list)` threads will be started, with thread `i` enqueuing
+the tensors from `tensor_list_list[i]`. `tensor_list_list[i1][j]` must match
+`tensor_list_list[i2][j]` in type and shape, except in the first dimension if
+`enqueue_many` is true.
+
+If `enqueue_many` is `False`, each `tensor_list_list[i]` is assumed
+to represent a single example. An input tensor with shape `[x, y,
+z]` will be output as a tensor with shape `[batch_size, x, y, z]`.
+
+If `enqueue_many` is `True`, `tensor_list_list[i]` is assumed to
+represent a batch of examples, where the first dimension is indexed
+by example, and all members of `tensor_list_list[i]` should have the
+same size in the first dimension. If an input tensor has shape `[*, x,
+y, z]`, the output will have shape `[batch_size, x, y, z]`.
-* a shuffling queue into which tensors from tensor_list_list are enqueued.
-* a dequeue many operation to create batches from the queue,
-* and a QueueRunner is added to the current Graph's QUEUE_RUNNER collection,
- to enqueue the tensors from tensor_list_list.
+The `capacity` argument controls the how long the prefetching is allowed to
+grow the queues.
##### Args: <a class="md-anchor" id="AUTOGENERATED-args-"></a>
* <b>tensor_list_list</b>: A list of tuples of tensors to enqueue.
- len(tensor_list_list) threads will be started, with the i-th
- thread enqueuing the tensors from tensor_list[i].
- tensor_list[i1][j] must match tensor_list[i2][j] in type and
- shape (except in the first dimension if enqueue_many is true).
-* <b>batch_size</b>: The new batch size pulled from the queue.
-* <b>capacity</b>: Maximum number of elements in the queue, controls the
- how far ahead the prefetching allowed is allowed to get and
- memory usage.
+* <b>batch_size</b>: An integer. The new batch size pulled from the queue.
+* <b>capacity</b>: An integer. The maximum number of elements in the queue.
* <b>min_after_dequeue</b>: Minimum number elements in the queue after a
dequeue, used to ensure a level of mixing of elements.
* <b>seed</b>: Seed for the random shuffling within the queue.
-* <b>enqueue_many</b>: If `False`, each tensor_list_list[i] is assumed to
- represent a single example. If `True`, tensor_list_list[i] is
- assumed to represent a batch of examples, where the first
- dimension is indexed by example, and all members of
- tensor_list_list[i] should have the same size in the first
- dimension.
-* <b>shapes</b>: Optional. The shapes for each example. Defaults to the
- inferred shapes for `tensor_list_list[i]` (which must match, after
- leaving off the first dimension if enqueue_many is `True`).
-* <b>name</b>: A name for the operations (optional).
+* <b>enqueue_many</b>: Whether each tensor in `tensor_list_list` is a single
+ example.
+* <b>shapes</b>: (Optional) The shapes for each example. Defaults to the
+ inferred shapes for `tensor_list_list[i]`.
+* <b>name</b>: (Optional) A name for the operations.
##### Returns: <a class="md-anchor" id="AUTOGENERATED-returns-"></a>
- A list of tensors with the same number and types as
- tensor_list_list[i]. If enqueue_many is false, then an input
- tensor with shape `[x, y, z]` will be output as a tensor with
- shape `[batch_size, x, y, z]`. If enqueue_many is True, and an
- input tensor has shape `[*, x, y, z]`, the the output will have
- shape `[batch_size, x, y, z]`.
+ A list of tensors with the same number and types as `tensor_list_list[i]`.
diff --git a/tensorflow/g3doc/api_docs/python/math_ops.md b/tensorflow/g3doc/api_docs/python/math_ops.md
index 3ccf56443f..4273025543 100644
--- a/tensorflow/g3doc/api_docs/python/math_ops.md
+++ b/tensorflow/g3doc/api_docs/python/math_ops.md
@@ -9,73 +9,73 @@ accepted by [`tf.convert_to_tensor`](framework.md#convert_to_tensor).
## Contents
### [Math](#AUTOGENERATED-math)
* [Arithmetic Operators](#AUTOGENERATED-arithmetic-operators)
- * [tf.add(x, y, name=None)](#add)
- * [tf.sub(x, y, name=None)](#sub)
- * [tf.mul(x, y, name=None)](#mul)
- * [tf.div(x, y, name=None)](#div)
- * [tf.mod(x, y, name=None)](#mod)
+ * [`tf.add(x, y, name=None)`](#add)
+ * [`tf.sub(x, y, name=None)`](#sub)
+ * [`tf.mul(x, y, name=None)`](#mul)
+ * [`tf.div(x, y, name=None)`](#div)
+ * [`tf.mod(x, y, name=None)`](#mod)
* [Basic Math Functions](#AUTOGENERATED-basic-math-functions)
- * [tf.add_n(inputs, name=None)](#add_n)
- * [tf.abs(x, name=None)](#abs)
- * [tf.neg(x, name=None)](#neg)
- * [tf.sign(x, name=None)](#sign)
- * [tf.inv(x, name=None)](#inv)
- * [tf.square(x, name=None)](#square)
- * [tf.round(x, name=None)](#round)
- * [tf.sqrt(x, name=None)](#sqrt)
- * [tf.rsqrt(x, name=None)](#rsqrt)
- * [tf.pow(x, y, name=None)](#pow)
- * [tf.exp(x, name=None)](#exp)
- * [tf.log(x, name=None)](#log)
- * [tf.ceil(x, name=None)](#ceil)
- * [tf.floor(x, name=None)](#floor)
- * [tf.maximum(x, y, name=None)](#maximum)
- * [tf.minimum(x, y, name=None)](#minimum)
- * [tf.cos(x, name=None)](#cos)
- * [tf.sin(x, name=None)](#sin)
+ * [`tf.add_n(inputs, name=None)`](#add_n)
+ * [`tf.abs(x, name=None)`](#abs)
+ * [`tf.neg(x, name=None)`](#neg)
+ * [`tf.sign(x, name=None)`](#sign)
+ * [`tf.inv(x, name=None)`](#inv)
+ * [`tf.square(x, name=None)`](#square)
+ * [`tf.round(x, name=None)`](#round)
+ * [`tf.sqrt(x, name=None)`](#sqrt)
+ * [`tf.rsqrt(x, name=None)`](#rsqrt)
+ * [`tf.pow(x, y, name=None)`](#pow)
+ * [`tf.exp(x, name=None)`](#exp)
+ * [`tf.log(x, name=None)`](#log)
+ * [`tf.ceil(x, name=None)`](#ceil)
+ * [`tf.floor(x, name=None)`](#floor)
+ * [`tf.maximum(x, y, name=None)`](#maximum)
+ * [`tf.minimum(x, y, name=None)`](#minimum)
+ * [`tf.cos(x, name=None)`](#cos)
+ * [`tf.sin(x, name=None)`](#sin)
* [Matrix Math Functions](#AUTOGENERATED-matrix-math-functions)
- * [tf.diag(diagonal, name=None)](#diag)
- * [tf.transpose(a, perm=None, name='transpose')](#transpose)
- * [tf.matmul(a, b, transpose_a=False, transpose_b=False, a_is_sparse=False, b_is_sparse=False, name=None)](#matmul)
- * [tf.batch_matmul(x, y, adj_x=None, adj_y=None, name=None)](#batch_matmul)
- * [tf.matrix_determinant(input, name=None)](#matrix_determinant)
- * [tf.batch_matrix_determinant(input, name=None)](#batch_matrix_determinant)
- * [tf.matrix_inverse(input, name=None)](#matrix_inverse)
- * [tf.batch_matrix_inverse(input, name=None)](#batch_matrix_inverse)
- * [tf.cholesky(input, name=None)](#cholesky)
- * [tf.batch_cholesky(input, name=None)](#batch_cholesky)
+ * [`tf.diag(diagonal, name=None)`](#diag)
+ * [`tf.transpose(a, perm=None, name='transpose')`](#transpose)
+ * [`tf.matmul(a, b, transpose_a=False, transpose_b=False, a_is_sparse=False, b_is_sparse=False, name=None)`](#matmul)
+ * [`tf.batch_matmul(x, y, adj_x=None, adj_y=None, name=None)`](#batch_matmul)
+ * [`tf.matrix_determinant(input, name=None)`](#matrix_determinant)
+ * [`tf.batch_matrix_determinant(input, name=None)`](#batch_matrix_determinant)
+ * [`tf.matrix_inverse(input, name=None)`](#matrix_inverse)
+ * [`tf.batch_matrix_inverse(input, name=None)`](#batch_matrix_inverse)
+ * [`tf.cholesky(input, name=None)`](#cholesky)
+ * [`tf.batch_cholesky(input, name=None)`](#batch_cholesky)
* [Complex Number Functions](#AUTOGENERATED-complex-number-functions)
- * [tf.complex(real, imag, name=None)](#complex)
- * [tf.complex_abs(x, name=None)](#complex_abs)
- * [tf.conj(in_, name=None)](#conj)
- * [tf.imag(in_, name=None)](#imag)
- * [tf.real(in_, name=None)](#real)
+ * [`tf.complex(real, imag, name=None)`](#complex)
+ * [`tf.complex_abs(x, name=None)`](#complex_abs)
+ * [`tf.conj(in_, name=None)`](#conj)
+ * [`tf.imag(in_, name=None)`](#imag)
+ * [`tf.real(in_, name=None)`](#real)
* [Reduction](#AUTOGENERATED-reduction)
- * [tf.reduce_sum(input_tensor, reduction_indices=None, keep_dims=False, name=None)](#reduce_sum)
- * [tf.reduce_prod(input_tensor, reduction_indices=None, keep_dims=False, name=None)](#reduce_prod)
- * [tf.reduce_min(input_tensor, reduction_indices=None, keep_dims=False, name=None)](#reduce_min)
- * [tf.reduce_max(input_tensor, reduction_indices=None, keep_dims=False, name=None)](#reduce_max)
- * [tf.reduce_mean(input_tensor, reduction_indices=None, keep_dims=False, name=None)](#reduce_mean)
- * [tf.reduce_all(input_tensor, reduction_indices=None, keep_dims=False, name=None)](#reduce_all)
- * [tf.reduce_any(input_tensor, reduction_indices=None, keep_dims=False, name=None)](#reduce_any)
- * [tf.accumulate_n(inputs, shape=None, tensor_dtype=None, name=None)](#accumulate_n)
+ * [`tf.reduce_sum(input_tensor, reduction_indices=None, keep_dims=False, name=None)`](#reduce_sum)
+ * [`tf.reduce_prod(input_tensor, reduction_indices=None, keep_dims=False, name=None)`](#reduce_prod)
+ * [`tf.reduce_min(input_tensor, reduction_indices=None, keep_dims=False, name=None)`](#reduce_min)
+ * [`tf.reduce_max(input_tensor, reduction_indices=None, keep_dims=False, name=None)`](#reduce_max)
+ * [`tf.reduce_mean(input_tensor, reduction_indices=None, keep_dims=False, name=None)`](#reduce_mean)
+ * [`tf.reduce_all(input_tensor, reduction_indices=None, keep_dims=False, name=None)`](#reduce_all)
+ * [`tf.reduce_any(input_tensor, reduction_indices=None, keep_dims=False, name=None)`](#reduce_any)
+ * [`tf.accumulate_n(inputs, shape=None, tensor_dtype=None, name=None)`](#accumulate_n)
* [Segmentation](#AUTOGENERATED-segmentation)
- * [tf.segment_sum(data, segment_ids, name=None)](#segment_sum)
- * [tf.segment_prod(data, segment_ids, name=None)](#segment_prod)
- * [tf.segment_min(data, segment_ids, name=None)](#segment_min)
- * [tf.segment_max(data, segment_ids, name=None)](#segment_max)
- * [tf.segment_mean(data, segment_ids, name=None)](#segment_mean)
- * [tf.unsorted_segment_sum(data, segment_ids, num_segments, name=None)](#unsorted_segment_sum)
- * [tf.sparse_segment_sum(data, indices, segment_ids, name=None)](#sparse_segment_sum)
- * [tf.sparse_segment_mean(data, indices, segment_ids, name=None)](#sparse_segment_mean)
+ * [`tf.segment_sum(data, segment_ids, name=None)`](#segment_sum)
+ * [`tf.segment_prod(data, segment_ids, name=None)`](#segment_prod)
+ * [`tf.segment_min(data, segment_ids, name=None)`](#segment_min)
+ * [`tf.segment_max(data, segment_ids, name=None)`](#segment_max)
+ * [`tf.segment_mean(data, segment_ids, name=None)`](#segment_mean)
+ * [`tf.unsorted_segment_sum(data, segment_ids, num_segments, name=None)`](#unsorted_segment_sum)
+ * [`tf.sparse_segment_sum(data, indices, segment_ids, name=None)`](#sparse_segment_sum)
+ * [`tf.sparse_segment_mean(data, indices, segment_ids, name=None)`](#sparse_segment_mean)
* [Sequence Comparison and Indexing](#AUTOGENERATED-sequence-comparison-and-indexing)
- * [tf.argmin(input, dimension, name=None)](#argmin)
- * [tf.argmax(input, dimension, name=None)](#argmax)
- * [tf.listdiff(x, y, name=None)](#listdiff)
- * [tf.where(input, name=None)](#where)
- * [tf.unique(x, name=None)](#unique)
- * [tf.edit_distance(hypothesis, truth, normalize=True, name='edit_distance')](#edit_distance)
- * [tf.invert_permutation(x, name=None)](#invert_permutation)
+ * [`tf.argmin(input, dimension, name=None)`](#argmin)
+ * [`tf.argmax(input, dimension, name=None)`](#argmax)
+ * [`tf.listdiff(x, y, name=None)`](#listdiff)
+ * [`tf.where(input, name=None)`](#where)
+ * [`tf.unique(x, name=None)`](#unique)
+ * [`tf.edit_distance(hypothesis, truth, normalize=True, name='edit_distance')`](#edit_distance)
+ * [`tf.invert_permutation(x, name=None)`](#invert_permutation)
<!-- TOC-END This section was generated by neural network, THANKS FOR READING! -->
@@ -87,7 +87,7 @@ operators to your graph.
- - -
-### tf.add(x, y, name=None) <a class="md-anchor" id="add"></a>
+### `tf.add(x, y, name=None)` <a class="md-anchor" id="add"></a>
Returns x + y element-wise.
@@ -107,7 +107,7 @@ Returns x + y element-wise.
- - -
-### tf.sub(x, y, name=None) <a class="md-anchor" id="sub"></a>
+### `tf.sub(x, y, name=None)` <a class="md-anchor" id="sub"></a>
Returns x - y element-wise.
@@ -125,7 +125,7 @@ Returns x - y element-wise.
- - -
-### tf.mul(x, y, name=None) <a class="md-anchor" id="mul"></a>
+### `tf.mul(x, y, name=None)` <a class="md-anchor" id="mul"></a>
Returns x * y element-wise.
@@ -143,7 +143,7 @@ Returns x * y element-wise.
- - -
-### tf.div(x, y, name=None) <a class="md-anchor" id="div"></a>
+### `tf.div(x, y, name=None)` <a class="md-anchor" id="div"></a>
Returns x / y element-wise.
@@ -161,7 +161,7 @@ Returns x / y element-wise.
- - -
-### tf.mod(x, y, name=None) <a class="md-anchor" id="mod"></a>
+### `tf.mod(x, y, name=None)` <a class="md-anchor" id="mod"></a>
Returns element-wise remainder of division.
@@ -185,7 +185,7 @@ mathematical functions to your graph.
- - -
-### tf.add_n(inputs, name=None) <a class="md-anchor" id="add_n"></a>
+### `tf.add_n(inputs, name=None)` <a class="md-anchor" id="add_n"></a>
Add all input tensors element wise.
@@ -203,7 +203,7 @@ Add all input tensors element wise.
- - -
-### tf.abs(x, name=None) <a class="md-anchor" id="abs"></a>
+### `tf.abs(x, name=None)` <a class="md-anchor" id="abs"></a>
Computes the absolute value of a tensor.
@@ -228,7 +228,7 @@ number.
- - -
-### tf.neg(x, name=None) <a class="md-anchor" id="neg"></a>
+### `tf.neg(x, name=None)` <a class="md-anchor" id="neg"></a>
Computes numerical negative value element-wise.
@@ -247,7 +247,7 @@ I.e., \\(y = -x\\).
- - -
-### tf.sign(x, name=None) <a class="md-anchor" id="sign"></a>
+### `tf.sign(x, name=None)` <a class="md-anchor" id="sign"></a>
Returns an element-wise indication of the sign of a number.
@@ -266,7 +266,7 @@ y = sign(x) = -1 if x < 0; 0 if x == 0; 1 if x > 0.
- - -
-### tf.inv(x, name=None) <a class="md-anchor" id="inv"></a>
+### `tf.inv(x, name=None)` <a class="md-anchor" id="inv"></a>
Computes the reciprocal of x element-wise.
@@ -285,7 +285,7 @@ I.e., \\(y = 1 / x\\).
- - -
-### tf.square(x, name=None) <a class="md-anchor" id="square"></a>
+### `tf.square(x, name=None)` <a class="md-anchor" id="square"></a>
Computes square of x element-wise.
@@ -304,7 +304,7 @@ I.e., \\(y = x * x = x^2\\).
- - -
-### tf.round(x, name=None) <a class="md-anchor" id="round"></a>
+### `tf.round(x, name=None)` <a class="md-anchor" id="round"></a>
Rounds the values of a tensor to the nearest integer, element-wise.
@@ -328,7 +328,7 @@ tf.round(a) ==> [ 1.0, 3.0, 2.0, -4.0 ]
- - -
-### tf.sqrt(x, name=None) <a class="md-anchor" id="sqrt"></a>
+### `tf.sqrt(x, name=None)` <a class="md-anchor" id="sqrt"></a>
Computes square root of x element-wise.
@@ -347,7 +347,7 @@ I.e., \\(y = \sqrt{x} = x^{1/2}\\).
- - -
-### tf.rsqrt(x, name=None) <a class="md-anchor" id="rsqrt"></a>
+### `tf.rsqrt(x, name=None)` <a class="md-anchor" id="rsqrt"></a>
Computes reciprocal of square root of x element-wise.
@@ -366,7 +366,7 @@ I.e., \\(y = 1 / \sqrt{x}\\).
- - -
-### tf.pow(x, y, name=None) <a class="md-anchor" id="pow"></a>
+### `tf.pow(x, y, name=None)` <a class="md-anchor" id="pow"></a>
Computes the power of one value to another.
@@ -393,7 +393,7 @@ tf.pow(x, y) ==> [[256, 65536], [9, 27]]
- - -
-### tf.exp(x, name=None) <a class="md-anchor" id="exp"></a>
+### `tf.exp(x, name=None)` <a class="md-anchor" id="exp"></a>
Computes exponential of x element-wise. \\(y = e^x\\).
@@ -410,7 +410,7 @@ Computes exponential of x element-wise. \\(y = e^x\\).
- - -
-### tf.log(x, name=None) <a class="md-anchor" id="log"></a>
+### `tf.log(x, name=None)` <a class="md-anchor" id="log"></a>
Computes natural logrithm of x element-wise.
@@ -429,7 +429,7 @@ I.e., \\(y = \log_e x\\).
- - -
-### tf.ceil(x, name=None) <a class="md-anchor" id="ceil"></a>
+### `tf.ceil(x, name=None)` <a class="md-anchor" id="ceil"></a>
Returns element-wise smallest integer in not less than x.
@@ -446,7 +446,7 @@ Returns element-wise smallest integer in not less than x.
- - -
-### tf.floor(x, name=None) <a class="md-anchor" id="floor"></a>
+### `tf.floor(x, name=None)` <a class="md-anchor" id="floor"></a>
Returns element-wise largest integer not greater than x.
@@ -463,7 +463,7 @@ Returns element-wise largest integer not greater than x.
- - -
-### tf.maximum(x, y, name=None) <a class="md-anchor" id="maximum"></a>
+### `tf.maximum(x, y, name=None)` <a class="md-anchor" id="maximum"></a>
Returns the max of x and y (i.e. x > y ? x : y) element-wise, broadcasts.
@@ -481,7 +481,7 @@ Returns the max of x and y (i.e. x > y ? x : y) element-wise, broadcasts.
- - -
-### tf.minimum(x, y, name=None) <a class="md-anchor" id="minimum"></a>
+### `tf.minimum(x, y, name=None)` <a class="md-anchor" id="minimum"></a>
Returns the min of x and y (i.e. x < y ? x : y) element-wise, broadcasts.
@@ -499,7 +499,7 @@ Returns the min of x and y (i.e. x < y ? x : y) element-wise, broadcasts.
- - -
-### tf.cos(x, name=None) <a class="md-anchor" id="cos"></a>
+### `tf.cos(x, name=None)` <a class="md-anchor" id="cos"></a>
Computes cos of x element-wise.
@@ -516,7 +516,7 @@ Computes cos of x element-wise.
- - -
-### tf.sin(x, name=None) <a class="md-anchor" id="sin"></a>
+### `tf.sin(x, name=None)` <a class="md-anchor" id="sin"></a>
Computes sin of x element-wise.
@@ -539,7 +539,7 @@ mathematical functions for matrices to your graph.
- - -
-### tf.diag(diagonal, name=None) <a class="md-anchor" id="diag"></a>
+### `tf.diag(diagonal, name=None)` <a class="md-anchor" id="diag"></a>
Returns a diagonal tensor with a given diagonal values.
@@ -575,7 +575,7 @@ tf.diag(diagonal) ==> [[1, 0, 0, 0]
- - -
-### tf.transpose(a, perm=None, name='transpose') <a class="md-anchor" id="transpose"></a>
+### `tf.transpose(a, perm=None, name='transpose')` <a class="md-anchor" id="transpose"></a>
Transposes `a`. Permutes the dimensions according to `perm`.
@@ -628,7 +628,7 @@ tf.transpose(b, perm=[0, 2, 1]) ==> [[[1 4]
- - -
-### tf.matmul(a, b, transpose_a=False, transpose_b=False, a_is_sparse=False, b_is_sparse=False, name=None) <a class="md-anchor" id="matmul"></a>
+### `tf.matmul(a, b, transpose_a=False, transpose_b=False, a_is_sparse=False, b_is_sparse=False, name=None)` <a class="md-anchor" id="matmul"></a>
Multiplies matrix `a` by matrix `b`, producing `a` * `b`.
@@ -677,7 +677,7 @@ c = tf.matmul(a, b) => [[58 64]
- - -
-### tf.batch_matmul(x, y, adj_x=None, adj_y=None, name=None) <a class="md-anchor" id="batch_matmul"></a>
+### `tf.batch_matmul(x, y, adj_x=None, adj_y=None, name=None)` <a class="md-anchor" id="batch_matmul"></a>
Multiplies slices of two tensors in batches.
@@ -722,7 +722,7 @@ It is computed as:
- - -
-### tf.matrix_determinant(input, name=None) <a class="md-anchor" id="matrix_determinant"></a>
+### `tf.matrix_determinant(input, name=None)` <a class="md-anchor" id="matrix_determinant"></a>
Calculates the determinant of a square matrix.
@@ -741,7 +741,7 @@ Calculates the determinant of a square matrix.
- - -
-### tf.batch_matrix_determinant(input, name=None) <a class="md-anchor" id="batch_matrix_determinant"></a>
+### `tf.batch_matrix_determinant(input, name=None)` <a class="md-anchor" id="batch_matrix_determinant"></a>
Calculates the determinants for a batch of square matrices.
@@ -764,7 +764,7 @@ for all input submatrices `[..., :, :]`.
- - -
-### tf.matrix_inverse(input, name=None) <a class="md-anchor" id="matrix_inverse"></a>
+### `tf.matrix_inverse(input, name=None)` <a class="md-anchor" id="matrix_inverse"></a>
Calculates the inverse of a square invertible matrix. Checks for invertibility.
@@ -783,7 +783,7 @@ Calculates the inverse of a square invertible matrix. Checks for invertibility.
- - -
-### tf.batch_matrix_inverse(input, name=None) <a class="md-anchor" id="batch_matrix_inverse"></a>
+### `tf.batch_matrix_inverse(input, name=None)` <a class="md-anchor" id="batch_matrix_inverse"></a>
Calculates the inverse of square invertible matrices. Checks for invertibility.
@@ -806,7 +806,7 @@ containing the inverse for all input submatrices `[..., :, :]`.
- - -
-### tf.cholesky(input, name=None) <a class="md-anchor" id="cholesky"></a>
+### `tf.cholesky(input, name=None)` <a class="md-anchor" id="cholesky"></a>
Calculates the Cholesky decomposition of a square matrix.
@@ -831,7 +831,7 @@ input.
- - -
-### tf.batch_cholesky(input, name=None) <a class="md-anchor" id="batch_cholesky"></a>
+### `tf.batch_cholesky(input, name=None)` <a class="md-anchor" id="batch_cholesky"></a>
Calculates the Cholesky decomposition of a batch of square matrices.
@@ -860,7 +860,7 @@ functions to your graph.
- - -
-### tf.complex(real, imag, name=None) <a class="md-anchor" id="complex"></a>
+### `tf.complex(real, imag, name=None)` <a class="md-anchor" id="complex"></a>
Converts two real numbers to a complex number.
@@ -893,7 +893,7 @@ tf.complex(real, imag) ==> [[2.25 + 4.74j], [3.25 + 5.75j]]
- - -
-### tf.complex_abs(x, name=None) <a class="md-anchor" id="complex_abs"></a>
+### `tf.complex_abs(x, name=None)` <a class="md-anchor" id="complex_abs"></a>
Computes the complex absolute value of a tensor.
@@ -922,7 +922,7 @@ tf.complex_abs(x) ==> [5.25594902, 6.60492229]
- - -
-### tf.conj(in_, name=None) <a class="md-anchor" id="conj"></a>
+### `tf.conj(in_, name=None)` <a class="md-anchor" id="conj"></a>
Returns the complex conjugate of a complex number.
@@ -953,7 +953,7 @@ tf.conj(in) ==> [-2.25 - 4.75j, 3.25 - 5.75j]
- - -
-### tf.imag(in_, name=None) <a class="md-anchor" id="imag"></a>
+### `tf.imag(in_, name=None)` <a class="md-anchor" id="imag"></a>
Returns the imaginary part of a complex number.
@@ -982,7 +982,7 @@ tf.imag(in) ==> [4.75, 5.75]
- - -
-### tf.real(in_, name=None) <a class="md-anchor" id="real"></a>
+### `tf.real(in_, name=None)` <a class="md-anchor" id="real"></a>
Returns the real part of a complex number.
@@ -1017,7 +1017,7 @@ common math computations that reduce various dimensions of a tensor.
- - -
-### tf.reduce_sum(input_tensor, reduction_indices=None, keep_dims=False, name=None) <a class="md-anchor" id="reduce_sum"></a>
+### `tf.reduce_sum(input_tensor, reduction_indices=None, keep_dims=False, name=None)` <a class="md-anchor" id="reduce_sum"></a>
Computes the sum of elements across dimensions of a tensor.
@@ -1057,7 +1057,7 @@ tf.reduce_sum(x, [0, 1]) ==> 6
- - -
-### tf.reduce_prod(input_tensor, reduction_indices=None, keep_dims=False, name=None) <a class="md-anchor" id="reduce_prod"></a>
+### `tf.reduce_prod(input_tensor, reduction_indices=None, keep_dims=False, name=None)` <a class="md-anchor" id="reduce_prod"></a>
Computes the product of elements across dimensions of a tensor.
@@ -1085,7 +1085,7 @@ tensor with a single element is returned.
- - -
-### tf.reduce_min(input_tensor, reduction_indices=None, keep_dims=False, name=None) <a class="md-anchor" id="reduce_min"></a>
+### `tf.reduce_min(input_tensor, reduction_indices=None, keep_dims=False, name=None)` <a class="md-anchor" id="reduce_min"></a>
Computes the minimum of elements across dimensions of a tensor.
@@ -1113,7 +1113,7 @@ tensor with a single element is returned.
- - -
-### tf.reduce_max(input_tensor, reduction_indices=None, keep_dims=False, name=None) <a class="md-anchor" id="reduce_max"></a>
+### `tf.reduce_max(input_tensor, reduction_indices=None, keep_dims=False, name=None)` <a class="md-anchor" id="reduce_max"></a>
Computes the maximum of elements across dimensions of a tensor.
@@ -1141,7 +1141,7 @@ tensor with a single element is returned.
- - -
-### tf.reduce_mean(input_tensor, reduction_indices=None, keep_dims=False, name=None) <a class="md-anchor" id="reduce_mean"></a>
+### `tf.reduce_mean(input_tensor, reduction_indices=None, keep_dims=False, name=None)` <a class="md-anchor" id="reduce_mean"></a>
Computes the mean of elements across dimensions of a tensor.
@@ -1179,7 +1179,7 @@ tf.reduce_mean(x, 1) ==> [1., 2.]
- - -
-### tf.reduce_all(input_tensor, reduction_indices=None, keep_dims=False, name=None) <a class="md-anchor" id="reduce_all"></a>
+### `tf.reduce_all(input_tensor, reduction_indices=None, keep_dims=False, name=None)` <a class="md-anchor" id="reduce_all"></a>
Computes the "logical and" of elements across dimensions of a tensor.
@@ -1217,7 +1217,7 @@ tf.reduce_all(x, 1) ==> [True, False]
- - -
-### tf.reduce_any(input_tensor, reduction_indices=None, keep_dims=False, name=None) <a class="md-anchor" id="reduce_any"></a>
+### `tf.reduce_any(input_tensor, reduction_indices=None, keep_dims=False, name=None)` <a class="md-anchor" id="reduce_any"></a>
Computes the "logical or" of elements across dimensions of a tensor.
@@ -1256,7 +1256,7 @@ tf.reduce_any(x, 1) ==> [True, False]
- - -
-### tf.accumulate_n(inputs, shape=None, tensor_dtype=None, name=None) <a class="md-anchor" id="accumulate_n"></a>
+### `tf.accumulate_n(inputs, shape=None, tensor_dtype=None, name=None)` <a class="md-anchor" id="accumulate_n"></a>
Returns the element-wise sum of a list of tensors.
@@ -1318,7 +1318,7 @@ tf.segment_sum(c, tf.constant([0, 0, 1]))
- - -
-### tf.segment_sum(data, segment_ids, name=None) <a class="md-anchor" id="segment_sum"></a>
+### `tf.segment_sum(data, segment_ids, name=None)` <a class="md-anchor" id="segment_sum"></a>
Computes the sum along segments of a tensor.
@@ -1351,7 +1351,7 @@ that `segment_ids[j] == i`.
- - -
-### tf.segment_prod(data, segment_ids, name=None) <a class="md-anchor" id="segment_prod"></a>
+### `tf.segment_prod(data, segment_ids, name=None)` <a class="md-anchor" id="segment_prod"></a>
Computes the product along segments of a tensor.
@@ -1384,7 +1384,7 @@ that `segment_ids[j] == i`.
- - -
-### tf.segment_min(data, segment_ids, name=None) <a class="md-anchor" id="segment_min"></a>
+### `tf.segment_min(data, segment_ids, name=None)` <a class="md-anchor" id="segment_min"></a>
Computes the minimum along segments of a tensor.
@@ -1417,7 +1417,7 @@ that `segment_ids[j] == i`.
- - -
-### tf.segment_max(data, segment_ids, name=None) <a class="md-anchor" id="segment_max"></a>
+### `tf.segment_max(data, segment_ids, name=None)` <a class="md-anchor" id="segment_max"></a>
Computes the maximum along segments of a tensor.
@@ -1450,7 +1450,7 @@ that `segment_ids[j] == i`.
- - -
-### tf.segment_mean(data, segment_ids, name=None) <a class="md-anchor" id="segment_mean"></a>
+### `tf.segment_mean(data, segment_ids, name=None)` <a class="md-anchor" id="segment_mean"></a>
Computes the mean along segments of a tensor.
@@ -1485,7 +1485,7 @@ values summed.
- - -
-### tf.unsorted_segment_sum(data, segment_ids, num_segments, name=None) <a class="md-anchor" id="unsorted_segment_sum"></a>
+### `tf.unsorted_segment_sum(data, segment_ids, num_segments, name=None)` <a class="md-anchor" id="unsorted_segment_sum"></a>
Computes the sum along segments of a tensor.
@@ -1526,7 +1526,7 @@ If the sum is empty for a given segment ID `i`, `output[i] = 0`.
- - -
-### tf.sparse_segment_sum(data, indices, segment_ids, name=None) <a class="md-anchor" id="sparse_segment_sum"></a>
+### `tf.sparse_segment_sum(data, indices, segment_ids, name=None)` <a class="md-anchor" id="sparse_segment_sum"></a>
Computes the sum along sparse segments of a tensor.
@@ -1578,7 +1578,7 @@ tf.segment_sum(c, tf.constant([0, 0, 1]))
- - -
-### tf.sparse_segment_mean(data, indices, segment_ids, name=None) <a class="md-anchor" id="sparse_segment_mean"></a>
+### `tf.sparse_segment_mean(data, indices, segment_ids, name=None)` <a class="md-anchor" id="sparse_segment_mean"></a>
Computes the mean along sparse segments of a tensor.
@@ -1616,7 +1616,7 @@ a tensor.
- - -
-### tf.argmin(input, dimension, name=None) <a class="md-anchor" id="argmin"></a>
+### `tf.argmin(input, dimension, name=None)` <a class="md-anchor" id="argmin"></a>
Returns the index with the smallest value across dimensions of a tensor.
@@ -1636,7 +1636,7 @@ Returns the index with the smallest value across dimensions of a tensor.
- - -
-### tf.argmax(input, dimension, name=None) <a class="md-anchor" id="argmax"></a>
+### `tf.argmax(input, dimension, name=None)` <a class="md-anchor" id="argmax"></a>
Returns the index with the largest value across dimensions of a tensor.
@@ -1657,7 +1657,7 @@ Returns the index with the largest value across dimensions of a tensor.
- - -
-### tf.listdiff(x, y, name=None) <a class="md-anchor" id="listdiff"></a>
+### `tf.listdiff(x, y, name=None)` <a class="md-anchor" id="listdiff"></a>
Computes the difference between two lists of numbers.
@@ -1700,7 +1700,7 @@ idx ==> [1, 3, 5]
- - -
-### tf.where(input, name=None) <a class="md-anchor" id="where"></a>
+### `tf.where(input, name=None)` <a class="md-anchor" id="where"></a>
Returns locations of true values in a boolean tensor.
@@ -1749,7 +1749,7 @@ where(input) ==> [[0, 0, 0],
- - -
-### tf.unique(x, name=None) <a class="md-anchor" id="unique"></a>
+### `tf.unique(x, name=None)` <a class="md-anchor" id="unique"></a>
Finds unique elements in a 1-D tensor.
@@ -1786,7 +1786,7 @@ idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
- - -
-### tf.edit_distance(hypothesis, truth, normalize=True, name='edit_distance') <a class="md-anchor" id="edit_distance"></a>
+### `tf.edit_distance(hypothesis, truth, normalize=True, name='edit_distance')` <a class="md-anchor" id="edit_distance"></a>
Computes the Levenshtein distance between sequences.
@@ -1855,7 +1855,7 @@ output ==> [[inf, 1.0], # (0,0): no truth, (0,1): no hypothesis
- - -
-### tf.invert_permutation(x, name=None) <a class="md-anchor" id="invert_permutation"></a>
+### `tf.invert_permutation(x, name=None)` <a class="md-anchor" id="invert_permutation"></a>
Computes the inverse permutation of a tensor.
diff --git a/tensorflow/g3doc/api_docs/python/nn.md b/tensorflow/g3doc/api_docs/python/nn.md
index 11c820847d..ada49a6b11 100644
--- a/tensorflow/g3doc/api_docs/python/nn.md
+++ b/tensorflow/g3doc/api_docs/python/nn.md
@@ -9,47 +9,47 @@ accepted by [`tf.convert_to_tensor`](framework.md#convert_to_tensor).
## Contents
### [Neural Network](#AUTOGENERATED-neural-network)
* [Activation Functions](#AUTOGENERATED-activation-functions)
- * [tf.nn.relu(features, name=None)](#relu)
- * [tf.nn.relu6(features, name=None)](#relu6)
- * [tf.nn.softplus(features, name=None)](#softplus)
- * [tf.nn.dropout(x, keep_prob, noise_shape=None, seed=None, name=None)](#dropout)
- * [tf.nn.bias_add(value, bias, name=None)](#bias_add)
- * [tf.sigmoid(x, name=None)](#sigmoid)
- * [tf.tanh(x, name=None)](#tanh)
+ * [`tf.nn.relu(features, name=None)`](#relu)
+ * [`tf.nn.relu6(features, name=None)`](#relu6)
+ * [`tf.nn.softplus(features, name=None)`](#softplus)
+ * [`tf.nn.dropout(x, keep_prob, noise_shape=None, seed=None, name=None)`](#dropout)
+ * [`tf.nn.bias_add(value, bias, name=None)`](#bias_add)
+ * [`tf.sigmoid(x, name=None)`](#sigmoid)
+ * [`tf.tanh(x, name=None)`](#tanh)
* [Convolution](#AUTOGENERATED-convolution)
- * [tf.nn.conv2d(input, filter, strides, padding, use_cudnn_on_gpu=None, name=None)](#conv2d)
- * [tf.nn.depthwise_conv2d(input, filter, strides, padding, name=None)](#depthwise_conv2d)
- * [tf.nn.separable_conv2d(input, depthwise_filter, pointwise_filter, strides, padding, name=None)](#separable_conv2d)
+ * [`tf.nn.conv2d(input, filter, strides, padding, use_cudnn_on_gpu=None, name=None)`](#conv2d)
+ * [`tf.nn.depthwise_conv2d(input, filter, strides, padding, name=None)`](#depthwise_conv2d)
+ * [`tf.nn.separable_conv2d(input, depthwise_filter, pointwise_filter, strides, padding, name=None)`](#separable_conv2d)
* [Pooling](#AUTOGENERATED-pooling)
- * [tf.nn.avg_pool(value, ksize, strides, padding, name=None)](#avg_pool)
- * [tf.nn.max_pool(value, ksize, strides, padding, name=None)](#max_pool)
- * [tf.nn.max_pool_with_argmax(input, ksize, strides, padding, Targmax=None, name=None)](#max_pool_with_argmax)
+ * [`tf.nn.avg_pool(value, ksize, strides, padding, name=None)`](#avg_pool)
+ * [`tf.nn.max_pool(value, ksize, strides, padding, name=None)`](#max_pool)
+ * [`tf.nn.max_pool_with_argmax(input, ksize, strides, padding, Targmax=None, name=None)`](#max_pool_with_argmax)
* [Normalization](#AUTOGENERATED-normalization)
- * [tf.nn.l2_normalize(x, dim, epsilon=1e-12, name=None)](#l2_normalize)
- * [tf.nn.local_response_normalization(input, depth_radius=None, bias=None, alpha=None, beta=None, name=None)](#local_response_normalization)
- * [tf.nn.moments(x, axes, name=None)](#moments)
+ * [`tf.nn.l2_normalize(x, dim, epsilon=1e-12, name=None)`](#l2_normalize)
+ * [`tf.nn.local_response_normalization(input, depth_radius=None, bias=None, alpha=None, beta=None, name=None)`](#local_response_normalization)
+ * [`tf.nn.moments(x, axes, name=None)`](#moments)
* [Losses](#AUTOGENERATED-losses)
- * [tf.nn.l2_loss(t, name=None)](#l2_loss)
+ * [`tf.nn.l2_loss(t, name=None)`](#l2_loss)
* [Classification](#AUTOGENERATED-classification)
- * [tf.nn.sigmoid_cross_entropy_with_logits(logits, targets, name=None)](#sigmoid_cross_entropy_with_logits)
- * [tf.nn.softmax(logits, name=None)](#softmax)
- * [tf.nn.softmax_cross_entropy_with_logits(logits, labels, name=None)](#softmax_cross_entropy_with_logits)
+ * [`tf.nn.sigmoid_cross_entropy_with_logits(logits, targets, name=None)`](#sigmoid_cross_entropy_with_logits)
+ * [`tf.nn.softmax(logits, name=None)`](#softmax)
+ * [`tf.nn.softmax_cross_entropy_with_logits(logits, labels, name=None)`](#softmax_cross_entropy_with_logits)
* [Embeddings](#AUTOGENERATED-embeddings)
- * [tf.nn.embedding_lookup(params, ids, name=None)](#embedding_lookup)
+ * [`tf.nn.embedding_lookup(params, ids, name=None)`](#embedding_lookup)
* [Evaluation](#AUTOGENERATED-evaluation)
- * [tf.nn.top_k(input, k, name=None)](#top_k)
- * [tf.nn.in_top_k(predictions, targets, k, name=None)](#in_top_k)
+ * [`tf.nn.top_k(input, k, name=None)`](#top_k)
+ * [`tf.nn.in_top_k(predictions, targets, k, name=None)`](#in_top_k)
* [Candidate Sampling](#AUTOGENERATED-candidate-sampling)
* [Sampled Loss Functions](#AUTOGENERATED-sampled-loss-functions)
- * [tf.nn.nce_loss(weights, biases, inputs, labels, num_sampled, num_classes, num_true=1, sampled_values=None, remove_accidental_hits=False, name='nce_loss')](#nce_loss)
- * [tf.nn.sampled_softmax_loss(weights, biases, inputs, labels, num_sampled, num_classes, num_true=1, sampled_values=None, remove_accidental_hits=True, name='sampled_softmax_loss')](#sampled_softmax_loss)
+ * [`tf.nn.nce_loss(weights, biases, inputs, labels, num_sampled, num_classes, num_true=1, sampled_values=None, remove_accidental_hits=False, name='nce_loss')`](#nce_loss)
+ * [`tf.nn.sampled_softmax_loss(weights, biases, inputs, labels, num_sampled, num_classes, num_true=1, sampled_values=None, remove_accidental_hits=True, name='sampled_softmax_loss')`](#sampled_softmax_loss)
* [Candidate Samplers](#AUTOGENERATED-candidate-samplers)
- * [tf.nn.uniform_candidate_sampler(true_classes, num_true, num_sampled, unique, range_max, seed=None, name=None)](#uniform_candidate_sampler)
- * [tf.nn.log_uniform_candidate_sampler(true_classes, num_true, num_sampled, unique, range_max, seed=None, name=None)](#log_uniform_candidate_sampler)
- * [tf.nn.learned_unigram_candidate_sampler(true_classes, num_true, num_sampled, unique, range_max, seed=None, name=None)](#learned_unigram_candidate_sampler)
- * [tf.nn.fixed_unigram_candidate_sampler(true_classes, num_true, num_sampled, unique, range_max, vocab_file='', distortion=0.0, num_reserved_ids=0, num_shards=1, shard=0, unigrams=[], seed=None, name=None)](#fixed_unigram_candidate_sampler)
+ * [`tf.nn.uniform_candidate_sampler(true_classes, num_true, num_sampled, unique, range_max, seed=None, name=None)`](#uniform_candidate_sampler)
+ * [`tf.nn.log_uniform_candidate_sampler(true_classes, num_true, num_sampled, unique, range_max, seed=None, name=None)`](#log_uniform_candidate_sampler)
+ * [`tf.nn.learned_unigram_candidate_sampler(true_classes, num_true, num_sampled, unique, range_max, seed=None, name=None)`](#learned_unigram_candidate_sampler)
+ * [`tf.nn.fixed_unigram_candidate_sampler(true_classes, num_true, num_sampled, unique, range_max, vocab_file='', distortion=0.0, num_reserved_ids=0, num_shards=1, shard=0, unigrams=[], seed=None, name=None)`](#fixed_unigram_candidate_sampler)
* [Miscellaneous candidate sampling utilities](#AUTOGENERATED-miscellaneous-candidate-sampling-utilities)
- * [tf.nn.compute_accidental_hits(true_classes, sampled_candidates, num_true, seed=None, name=None)](#compute_accidental_hits)
+ * [`tf.nn.compute_accidental_hits(true_classes, sampled_candidates, num_true, seed=None, name=None)`](#compute_accidental_hits)
<!-- TOC-END This section was generated by neural network, THANKS FOR READING! -->
@@ -67,7 +67,7 @@ shape as the input tensor.
- - -
-### tf.nn.relu(features, name=None) <a class="md-anchor" id="relu"></a>
+### `tf.nn.relu(features, name=None)` <a class="md-anchor" id="relu"></a>
Computes rectified linear: `max(features, 0)`.
@@ -84,7 +84,7 @@ Computes rectified linear: `max(features, 0)`.
- - -
-### tf.nn.relu6(features, name=None) <a class="md-anchor" id="relu6"></a>
+### `tf.nn.relu6(features, name=None)` <a class="md-anchor" id="relu6"></a>
Computes Rectified Linear 6: `min(max(features, 0), 6)`.
@@ -102,7 +102,7 @@ Computes Rectified Linear 6: `min(max(features, 0), 6)`.
- - -
-### tf.nn.softplus(features, name=None) <a class="md-anchor" id="softplus"></a>
+### `tf.nn.softplus(features, name=None)` <a class="md-anchor" id="softplus"></a>
Computes softplus: `log(exp(features) + 1)`.
@@ -119,7 +119,7 @@ Computes softplus: `log(exp(features) + 1)`.
- - -
-### tf.nn.dropout(x, keep_prob, noise_shape=None, seed=None, name=None) <a class="md-anchor" id="dropout"></a>
+### `tf.nn.dropout(x, keep_prob, noise_shape=None, seed=None, name=None)` <a class="md-anchor" id="dropout"></a>
Computes dropout.
@@ -158,7 +158,7 @@ kept independently and each row and column will be kept or not kept together.
- - -
-### tf.nn.bias_add(value, bias, name=None) <a class="md-anchor" id="bias_add"></a>
+### `tf.nn.bias_add(value, bias, name=None)` <a class="md-anchor" id="bias_add"></a>
Adds `bias` to `value`.
@@ -184,7 +184,7 @@ case where both types are quantized.
- - -
-### tf.sigmoid(x, name=None) <a class="md-anchor" id="sigmoid"></a>
+### `tf.sigmoid(x, name=None)` <a class="md-anchor" id="sigmoid"></a>
Computes sigmoid of `x` element-wise.
@@ -205,7 +205,7 @@ Specifically, `y = 1 / (1 + exp(-x))`.
- - -
-### tf.tanh(x, name=None) <a class="md-anchor" id="tanh"></a>
+### `tf.tanh(x, name=None)` <a class="md-anchor" id="tanh"></a>
Computes hyperbolic tangent of `x` element-wise.
@@ -270,7 +270,7 @@ In the formula for `shape(output)`, the rounding direction depends on padding:
- - -
-### tf.nn.conv2d(input, filter, strides, padding, use_cudnn_on_gpu=None, name=None) <a class="md-anchor" id="conv2d"></a>
+### `tf.nn.conv2d(input, filter, strides, padding, use_cudnn_on_gpu=None, name=None)` <a class="md-anchor" id="conv2d"></a>
Computes a 2-D convolution given 4-D `input` and `filter` tensors.
@@ -316,7 +316,7 @@ horizontal and vertices strides, `strides = [1, stride, stride, 1]`.
- - -
-### tf.nn.depthwise_conv2d(input, filter, strides, padding, name=None) <a class="md-anchor" id="depthwise_conv2d"></a>
+### `tf.nn.depthwise_conv2d(input, filter, strides, padding, name=None)` <a class="md-anchor" id="depthwise_conv2d"></a>
Depthwise 2-D convolution.
@@ -356,7 +356,7 @@ same horizontal and vertical strides, `strides = [1, stride, stride, 1]`.
- - -
-### tf.nn.separable_conv2d(input, depthwise_filter, pointwise_filter, strides, padding, name=None) <a class="md-anchor" id="separable_conv2d"></a>
+### `tf.nn.separable_conv2d(input, depthwise_filter, pointwise_filter, strides, padding, name=None)` <a class="md-anchor" id="separable_conv2d"></a>
2-D convolution with separable filters.
@@ -421,7 +421,7 @@ where the rounding direction depends on padding:
- - -
-### tf.nn.avg_pool(value, ksize, strides, padding, name=None) <a class="md-anchor" id="avg_pool"></a>
+### `tf.nn.avg_pool(value, ksize, strides, padding, name=None)` <a class="md-anchor" id="avg_pool"></a>
Performs the average pooling on the input.
@@ -448,7 +448,7 @@ window in `value`.
- - -
-### tf.nn.max_pool(value, ksize, strides, padding, name=None) <a class="md-anchor" id="max_pool"></a>
+### `tf.nn.max_pool(value, ksize, strides, padding, name=None)` <a class="md-anchor" id="max_pool"></a>
Performs the max pooling on the input.
@@ -471,7 +471,7 @@ Performs the max pooling on the input.
- - -
-### tf.nn.max_pool_with_argmax(input, ksize, strides, padding, Targmax=None, name=None) <a class="md-anchor" id="max_pool_with_argmax"></a>
+### `tf.nn.max_pool_with_argmax(input, ksize, strides, padding, Targmax=None, name=None)` <a class="md-anchor" id="max_pool_with_argmax"></a>
Performs max pooling on the input and outputs both max values and indices.
@@ -510,7 +510,7 @@ have varying scale, and to aid generalization.
- - -
-### tf.nn.l2_normalize(x, dim, epsilon=1e-12, name=None) <a class="md-anchor" id="l2_normalize"></a>
+### `tf.nn.l2_normalize(x, dim, epsilon=1e-12, name=None)` <a class="md-anchor" id="l2_normalize"></a>
Normalizes along dimension `dim` using an L2 norm.
@@ -537,7 +537,7 @@ dimension `dim`.
- - -
-### tf.nn.local_response_normalization(input, depth_radius=None, bias=None, alpha=None, beta=None, name=None) <a class="md-anchor" id="local_response_normalization"></a>
+### `tf.nn.local_response_normalization(input, depth_radius=None, bias=None, alpha=None, beta=None, name=None)` <a class="md-anchor" id="local_response_normalization"></a>
Local Response Normalization.
@@ -574,7 +574,7 @@ convolutional neural networks (NIPS 2012)]
- - -
-### tf.nn.moments(x, axes, name=None) <a class="md-anchor" id="moments"></a>
+### `tf.nn.moments(x, axes, name=None)` <a class="md-anchor" id="moments"></a>
Calculate the mean and variance of `x`.
@@ -608,7 +608,7 @@ or for regularization purposes (weight decay).
- - -
-### tf.nn.l2_loss(t, name=None) <a class="md-anchor" id="l2_loss"></a>
+### `tf.nn.l2_loss(t, name=None)` <a class="md-anchor" id="l2_loss"></a>
L2 Loss.
@@ -635,7 +635,7 @@ TensorFlow provides several operations that help you perform classification.
- - -
-### tf.nn.sigmoid_cross_entropy_with_logits(logits, targets, name=None) <a class="md-anchor" id="sigmoid_cross_entropy_with_logits"></a>
+### `tf.nn.sigmoid_cross_entropy_with_logits(logits, targets, name=None)` <a class="md-anchor" id="sigmoid_cross_entropy_with_logits"></a>
Computes sigmoid cross entropy given `logits`.
@@ -669,7 +669,7 @@ To ensure stability and avoid overflow, the implementation uses
- - -
-### tf.nn.softmax(logits, name=None) <a class="md-anchor" id="softmax"></a>
+### `tf.nn.softmax(logits, name=None)` <a class="md-anchor" id="softmax"></a>
Computes softmax activations.
@@ -691,7 +691,7 @@ For each batch `i` and class `j` we have
- - -
-### tf.nn.softmax_cross_entropy_with_logits(logits, labels, name=None) <a class="md-anchor" id="softmax_cross_entropy_with_logits"></a>
+### `tf.nn.softmax_cross_entropy_with_logits(logits, labels, name=None)` <a class="md-anchor" id="softmax_cross_entropy_with_logits"></a>
Computes softmax cross entropy between `logits` and `labels`.
@@ -728,7 +728,7 @@ tensors.
- - -
-### tf.nn.embedding_lookup(params, ids, name=None) <a class="md-anchor" id="embedding_lookup"></a>
+### `tf.nn.embedding_lookup(params, ids, name=None)` <a class="md-anchor" id="embedding_lookup"></a>
Looks up `ids` in a list of embedding tensors.
@@ -770,7 +770,7 @@ Since they are nondifferentiable, they are typically used at evaluation time.
- - -
-### tf.nn.top_k(input, k, name=None) <a class="md-anchor" id="top_k"></a>
+### `tf.nn.top_k(input, k, name=None)` <a class="md-anchor" id="top_k"></a>
Returns the values and indices of the k largest elements for each row.
@@ -800,7 +800,7 @@ elements are equal, the lower-index element appears first.
- - -
-### tf.nn.in_top_k(predictions, targets, k, name=None) <a class="md-anchor" id="in_top_k"></a>
+### `tf.nn.in_top_k(predictions, targets, k, name=None)` <a class="md-anchor" id="in_top_k"></a>
Says whether the targets are in the top K predictions.
@@ -852,7 +852,7 @@ TensorFlow provides the following sampled loss functions for faster training.
- - -
-### tf.nn.nce_loss(weights, biases, inputs, labels, num_sampled, num_classes, num_true=1, sampled_values=None, remove_accidental_hits=False, name='nce_loss') <a class="md-anchor" id="nce_loss"></a>
+### `tf.nn.nce_loss(weights, biases, inputs, labels, num_sampled, num_classes, num_true=1, sampled_values=None, remove_accidental_hits=False, name='nce_loss')` <a class="md-anchor" id="nce_loss"></a>
Computes and returns the noise-contrastive estimation training loss.
@@ -903,7 +903,7 @@ with an otherwise unused class.
- - -
-### tf.nn.sampled_softmax_loss(weights, biases, inputs, labels, num_sampled, num_classes, num_true=1, sampled_values=None, remove_accidental_hits=True, name='sampled_softmax_loss') <a class="md-anchor" id="sampled_softmax_loss"></a>
+### `tf.nn.sampled_softmax_loss(weights, biases, inputs, labels, num_sampled, num_classes, num_true=1, sampled_values=None, remove_accidental_hits=True, name='sampled_softmax_loss')` <a class="md-anchor" id="sampled_softmax_loss"></a>
Computes and returns the sampled softmax training loss.
@@ -955,7 +955,7 @@ classes when using one of the sampled loss functions above.
- - -
-### tf.nn.uniform_candidate_sampler(true_classes, num_true, num_sampled, unique, range_max, seed=None, name=None) <a class="md-anchor" id="uniform_candidate_sampler"></a>
+### `tf.nn.uniform_candidate_sampler(true_classes, num_true, num_sampled, unique, range_max, seed=None, name=None)` <a class="md-anchor" id="uniform_candidate_sampler"></a>
Samples a set of classes using a uniform base distribution.
@@ -1007,7 +1007,7 @@ compute them approximately.
- - -
-### tf.nn.log_uniform_candidate_sampler(true_classes, num_true, num_sampled, unique, range_max, seed=None, name=None) <a class="md-anchor" id="log_uniform_candidate_sampler"></a>
+### `tf.nn.log_uniform_candidate_sampler(true_classes, num_true, num_sampled, unique, range_max, seed=None, name=None)` <a class="md-anchor" id="log_uniform_candidate_sampler"></a>
Samples a set of classes using a log-uniform (Zipfian) base distribution.
@@ -1066,7 +1066,7 @@ compute them approximately.
- - -
-### tf.nn.learned_unigram_candidate_sampler(true_classes, num_true, num_sampled, unique, range_max, seed=None, name=None) <a class="md-anchor" id="learned_unigram_candidate_sampler"></a>
+### `tf.nn.learned_unigram_candidate_sampler(true_classes, num_true, num_sampled, unique, range_max, seed=None, name=None)` <a class="md-anchor" id="learned_unigram_candidate_sampler"></a>
Samples a set of classes from a distribution learned during training.
@@ -1122,7 +1122,7 @@ compute them approximately.
- - -
-### tf.nn.fixed_unigram_candidate_sampler(true_classes, num_true, num_sampled, unique, range_max, vocab_file='', distortion=0.0, num_reserved_ids=0, num_shards=1, shard=0, unigrams=[], seed=None, name=None) <a class="md-anchor" id="fixed_unigram_candidate_sampler"></a>
+### `tf.nn.fixed_unigram_candidate_sampler(true_classes, num_true, num_sampled, unique, range_max, vocab_file='', distortion=0.0, num_reserved_ids=0, num_shards=1, shard=0, unigrams=[], seed=None, name=None)` <a class="md-anchor" id="fixed_unigram_candidate_sampler"></a>
Samples a set of classes using the provided (fixed) base distribution.
@@ -1203,7 +1203,7 @@ compute them approximately.
- - -
-### tf.nn.compute_accidental_hits(true_classes, sampled_candidates, num_true, seed=None, name=None) <a class="md-anchor" id="compute_accidental_hits"></a>
+### `tf.nn.compute_accidental_hits(true_classes, sampled_candidates, num_true, seed=None, name=None)` <a class="md-anchor" id="compute_accidental_hits"></a>
Compute the ids of positions in sampled_candidates matching true_classes.
diff --git a/tensorflow/g3doc/api_docs/python/python_io.md b/tensorflow/g3doc/api_docs/python/python_io.md
index df3c325454..3d52a358e1 100644
--- a/tensorflow/g3doc/api_docs/python/python_io.md
+++ b/tensorflow/g3doc/api_docs/python/python_io.md
@@ -6,7 +6,7 @@
### [Data IO (Python functions)](#AUTOGENERATED-data-io--python-functions-)
* [Data IO (Python Functions)](#AUTOGENERATED-data-io--python-functions-)
* [class tf.python_io.TFRecordWriter](#TFRecordWriter)
- * [tf.python_io.tf_record_iterator(path)](#tf_record_iterator)
+ * [`tf.python_io.tf_record_iterator(path)`](#tf_record_iterator)
* [TFRecords Format Details](#AUTOGENERATED-tfrecords-format-details)
@@ -29,7 +29,7 @@ in `with` blocks like a normal file.
- - -
-#### tf.python_io.TFRecordWriter.__init__(path) <a class="md-anchor" id="TFRecordWriter.__init__"></a>
+#### `tf.python_io.TFRecordWriter.__init__(path)` <a class="md-anchor" id="TFRecordWriter.__init__"></a>
Opens file `path` and creates a `TFRecordWriter` writing to it.
@@ -46,7 +46,7 @@ Opens file `path` and creates a `TFRecordWriter` writing to it.
- - -
-#### tf.python_io.TFRecordWriter.write(record) <a class="md-anchor" id="TFRecordWriter.write"></a>
+#### `tf.python_io.TFRecordWriter.write(record)` <a class="md-anchor" id="TFRecordWriter.write"></a>
Write a string record to the file.
@@ -58,7 +58,7 @@ Write a string record to the file.
- - -
-#### tf.python_io.TFRecordWriter.close() <a class="md-anchor" id="TFRecordWriter.close"></a>
+#### `tf.python_io.TFRecordWriter.close()` <a class="md-anchor" id="TFRecordWriter.close"></a>
Close the file.
@@ -66,7 +66,7 @@ Close the file.
- - -
-### tf.python_io.tf_record_iterator(path) <a class="md-anchor" id="tf_record_iterator"></a>
+### `tf.python_io.tf_record_iterator(path)` <a class="md-anchor" id="tf_record_iterator"></a>
An iterator that read the records from a TFRecords file.
diff --git a/tensorflow/g3doc/api_docs/python/sparse_ops.md b/tensorflow/g3doc/api_docs/python/sparse_ops.md
index d19c37e30c..7d338f2efb 100644
--- a/tensorflow/g3doc/api_docs/python/sparse_ops.md
+++ b/tensorflow/g3doc/api_docs/python/sparse_ops.md
@@ -12,14 +12,14 @@ accepted by [`tf.convert_to_tensor`](framework.md#convert_to_tensor).
* [class tf.SparseTensor](#SparseTensor)
* [class tf.SparseTensorValue](#SparseTensorValue)
* [Sparse to Dense Conversion](#AUTOGENERATED-sparse-to-dense-conversion)
- * [tf.sparse_to_dense(sparse_indices, output_shape, sparse_values, default_value, name=None)](#sparse_to_dense)
- * [tf.sparse_tensor_to_dense(sp_input, default_value, name=None)](#sparse_tensor_to_dense)
- * [tf.sparse_to_indicator(sp_input, vocab_size, name=None)](#sparse_to_indicator)
+ * [`tf.sparse_to_dense(sparse_indices, output_shape, sparse_values, default_value, name=None)`](#sparse_to_dense)
+ * [`tf.sparse_tensor_to_dense(sp_input, default_value, name=None)`](#sparse_tensor_to_dense)
+ * [`tf.sparse_to_indicator(sp_input, vocab_size, name=None)`](#sparse_to_indicator)
* [Manipulation](#AUTOGENERATED-manipulation)
- * [tf.sparse_concat(concat_dim, sp_inputs, name=None)](#sparse_concat)
- * [tf.sparse_reorder(sp_input, name=None)](#sparse_reorder)
- * [tf.sparse_retain(sp_input, to_retain)](#sparse_retain)
- * [tf.sparse_fill_empty_rows(sp_input, default_value, name=None)](#sparse_fill_empty_rows)
+ * [`tf.sparse_concat(concat_dim, sp_inputs, name=None)`](#sparse_concat)
+ * [`tf.sparse_reorder(sp_input, name=None)`](#sparse_reorder)
+ * [`tf.sparse_retain(sp_input, to_retain)`](#sparse_retain)
+ * [`tf.sparse_fill_empty_rows(sp_input, default_value, name=None)`](#sparse_fill_empty_rows)
<!-- TOC-END This section was generated by neural network, THANKS FOR READING! -->
@@ -81,7 +81,7 @@ represents the dense tensor
- - -
-#### tf.SparseTensor.__init__(indices, values, shape) <a class="md-anchor" id="SparseTensor.__init__"></a>
+#### `tf.SparseTensor.__init__(indices, values, shape)` <a class="md-anchor" id="SparseTensor.__init__"></a>
Creates a `SparseTensor`.
@@ -99,7 +99,7 @@ Creates a `SparseTensor`.
- - -
-#### tf.SparseTensor.indices <a class="md-anchor" id="SparseTensor.indices"></a>
+#### `tf.SparseTensor.indices` <a class="md-anchor" id="SparseTensor.indices"></a>
The indices of non-zero values in the represented dense tensor.
@@ -110,7 +110,7 @@ The indices of non-zero values in the represented dense tensor.
- - -
-#### tf.SparseTensor.values <a class="md-anchor" id="SparseTensor.values"></a>
+#### `tf.SparseTensor.values` <a class="md-anchor" id="SparseTensor.values"></a>
The non-zero values in the represented dense tensor.
@@ -120,19 +120,19 @@ The non-zero values in the represented dense tensor.
- - -
-#### tf.SparseTensor.dtype <a class="md-anchor" id="SparseTensor.dtype"></a>
+#### `tf.SparseTensor.dtype` <a class="md-anchor" id="SparseTensor.dtype"></a>
The `DType` of elements in this tensor.
- - -
-#### tf.SparseTensor.shape <a class="md-anchor" id="SparseTensor.shape"></a>
+#### `tf.SparseTensor.shape` <a class="md-anchor" id="SparseTensor.shape"></a>
A 1-D Tensor of int64 representing the shape of the dense tensor.
- - -
-#### tf.SparseTensor.graph <a class="md-anchor" id="SparseTensor.graph"></a>
+#### `tf.SparseTensor.graph` <a class="md-anchor" id="SparseTensor.graph"></a>
The `Graph` that contains the index, value, and shape tensors.
@@ -144,19 +144,19 @@ The `Graph` that contains the index, value, and shape tensors.
SparseTensorValue(indices, values, shape)
- - -
-#### tf.SparseTensorValue.indices <a class="md-anchor" id="SparseTensorValue.indices"></a>
+#### `tf.SparseTensorValue.indices` <a class="md-anchor" id="SparseTensorValue.indices"></a>
Alias for field number 0
- - -
-#### tf.SparseTensorValue.shape <a class="md-anchor" id="SparseTensorValue.shape"></a>
+#### `tf.SparseTensorValue.shape` <a class="md-anchor" id="SparseTensorValue.shape"></a>
Alias for field number 2
- - -
-#### tf.SparseTensorValue.values <a class="md-anchor" id="SparseTensorValue.values"></a>
+#### `tf.SparseTensorValue.values` <a class="md-anchor" id="SparseTensorValue.values"></a>
Alias for field number 1
@@ -166,7 +166,7 @@ Alias for field number 1
- - -
-### tf.sparse_to_dense(sparse_indices, output_shape, sparse_values, default_value, name=None) <a class="md-anchor" id="sparse_to_dense"></a>
+### `tf.sparse_to_dense(sparse_indices, output_shape, sparse_values, default_value, name=None)` <a class="md-anchor" id="sparse_to_dense"></a>
Converts a sparse representation into a dense tensor.
@@ -210,7 +210,7 @@ scalar, all sparse indices are set to this single value.
- - -
-### tf.sparse_tensor_to_dense(sp_input, default_value, name=None) <a class="md-anchor" id="sparse_tensor_to_dense"></a>
+### `tf.sparse_tensor_to_dense(sp_input, default_value, name=None)` <a class="md-anchor" id="sparse_tensor_to_dense"></a>
Converts a `SparseTensor` into a dense tensor.
@@ -251,7 +251,7 @@ string tensor with values:
- - -
-### tf.sparse_to_indicator(sp_input, vocab_size, name=None) <a class="md-anchor" id="sparse_to_indicator"></a>
+### `tf.sparse_to_indicator(sp_input, vocab_size, name=None)` <a class="md-anchor" id="sparse_to_indicator"></a>
Converts a `SparseTensor` of ids into a dense bool indicator tensor.
@@ -305,7 +305,7 @@ The input `SparseTensor` must be in row-major order.
- - -
-### tf.sparse_concat(concat_dim, sp_inputs, name=None) <a class="md-anchor" id="sparse_concat"></a>
+### `tf.sparse_concat(concat_dim, sp_inputs, name=None)` <a class="md-anchor" id="sparse_concat"></a>
Concatenates a list of `SparseTensor` along the specified dimension.
@@ -370,7 +370,7 @@ Graphically this is equivalent to doing
- - -
-### tf.sparse_reorder(sp_input, name=None) <a class="md-anchor" id="sparse_reorder"></a>
+### `tf.sparse_reorder(sp_input, name=None)` <a class="md-anchor" id="sparse_reorder"></a>
Reorders a `SparseTensor` into the canonical, row-major ordering.
@@ -414,7 +414,7 @@ then the output will be a `SparseTensor` of shape `[4, 5]` and
- - -
-### tf.sparse_retain(sp_input, to_retain) <a class="md-anchor" id="sparse_retain"></a>
+### `tf.sparse_retain(sp_input, to_retain)` <a class="md-anchor" id="sparse_retain"></a>
Retains specified non-empty values within a `SparseTensor`.
@@ -450,7 +450,7 @@ be a `SparseTensor` of shape `[4, 5]` with 2 non-empty values:
- - -
-### tf.sparse_fill_empty_rows(sp_input, default_value, name=None) <a class="md-anchor" id="sparse_fill_empty_rows"></a>
+### `tf.sparse_fill_empty_rows(sp_input, default_value, name=None)` <a class="md-anchor" id="sparse_fill_empty_rows"></a>
Fills empty rows in the input 2-D `SparseTensor` with a default value.
diff --git a/tensorflow/g3doc/api_docs/python/state_ops.md b/tensorflow/g3doc/api_docs/python/state_ops.md
index f18de539cb..5e8e96c000 100644
--- a/tensorflow/g3doc/api_docs/python/state_ops.md
+++ b/tensorflow/g3doc/api_docs/python/state_ops.md
@@ -11,31 +11,31 @@ accepted by [`tf.convert_to_tensor`](framework.md#convert_to_tensor).
* [Variables](#AUTOGENERATED-variables)
* [class tf.Variable](#Variable)
* [Variable helper functions](#AUTOGENERATED-variable-helper-functions)
- * [tf.all_variables()](#all_variables)
- * [tf.trainable_variables()](#trainable_variables)
- * [tf.initialize_all_variables()](#initialize_all_variables)
- * [tf.initialize_variables(var_list, name='init')](#initialize_variables)
- * [tf.assert_variables_initialized(var_list=None)](#assert_variables_initialized)
+ * [`tf.all_variables()`](#all_variables)
+ * [`tf.trainable_variables()`](#trainable_variables)
+ * [`tf.initialize_all_variables()`](#initialize_all_variables)
+ * [`tf.initialize_variables(var_list, name='init')`](#initialize_variables)
+ * [`tf.assert_variables_initialized(var_list=None)`](#assert_variables_initialized)
* [Saving and Restoring Variables](#AUTOGENERATED-saving-and-restoring-variables)
* [class tf.train.Saver](#Saver)
- * [tf.train.latest_checkpoint(checkpoint_dir, latest_filename=None)](#latest_checkpoint)
- * [tf.train.get_checkpoint_state(checkpoint_dir, latest_filename=None)](#get_checkpoint_state)
- * [tf.train.update_checkpoint_state(save_dir, model_checkpoint_path, all_model_checkpoint_paths=None, latest_filename=None)](#update_checkpoint_state)
+ * [`tf.train.latest_checkpoint(checkpoint_dir, latest_filename=None)`](#latest_checkpoint)
+ * [`tf.train.get_checkpoint_state(checkpoint_dir, latest_filename=None)`](#get_checkpoint_state)
+ * [`tf.train.update_checkpoint_state(save_dir, model_checkpoint_path, all_model_checkpoint_paths=None, latest_filename=None)`](#update_checkpoint_state)
* [Sharing Variables](#AUTOGENERATED-sharing-variables)
- * [tf.get_variable(name, shape=None, dtype=tf.float32, initializer=None, trainable=True, collections=None)](#get_variable)
- * [tf.get_variable_scope()](#get_variable_scope)
- * [tf.variable_scope(name_or_scope, reuse=None, initializer=None)](#variable_scope)
- * [tf.constant_initializer(value=0.0)](#constant_initializer)
- * [tf.random_normal_initializer(mean=0.0, stddev=1.0, seed=None)](#random_normal_initializer)
- * [tf.truncated_normal_initializer(mean=0.0, stddev=1.0, seed=None)](#truncated_normal_initializer)
- * [tf.random_uniform_initializer(minval=0.0, maxval=1.0, seed=None)](#random_uniform_initializer)
- * [tf.uniform_unit_scaling_initializer(factor=1.0, seed=None)](#uniform_unit_scaling_initializer)
- * [tf.zeros_initializer(shape, dtype=tf.float32)](#zeros_initializer)
+ * [`tf.get_variable(name, shape=None, dtype=tf.float32, initializer=None, trainable=True, collections=None)`](#get_variable)
+ * [`tf.get_variable_scope()`](#get_variable_scope)
+ * [`tf.variable_scope(name_or_scope, reuse=None, initializer=None)`](#variable_scope)
+ * [`tf.constant_initializer(value=0.0)`](#constant_initializer)
+ * [`tf.random_normal_initializer(mean=0.0, stddev=1.0, seed=None)`](#random_normal_initializer)
+ * [`tf.truncated_normal_initializer(mean=0.0, stddev=1.0, seed=None)`](#truncated_normal_initializer)
+ * [`tf.random_uniform_initializer(minval=0.0, maxval=1.0, seed=None)`](#random_uniform_initializer)
+ * [`tf.uniform_unit_scaling_initializer(factor=1.0, seed=None)`](#uniform_unit_scaling_initializer)
+ * [`tf.zeros_initializer(shape, dtype=tf.float32)`](#zeros_initializer)
* [Sparse Variable Updates](#AUTOGENERATED-sparse-variable-updates)
- * [tf.scatter_update(ref, indices, updates, use_locking=None, name=None)](#scatter_update)
- * [tf.scatter_add(ref, indices, updates, use_locking=None, name=None)](#scatter_add)
- * [tf.scatter_sub(ref, indices, updates, use_locking=None, name=None)](#scatter_sub)
- * [tf.sparse_mask(a, mask_indices, name=None)](#sparse_mask)
+ * [`tf.scatter_update(ref, indices, updates, use_locking=None, name=None)`](#scatter_update)
+ * [`tf.scatter_add(ref, indices, updates, use_locking=None, name=None)`](#scatter_add)
+ * [`tf.scatter_sub(ref, indices, updates, use_locking=None, name=None)`](#scatter_sub)
+ * [`tf.sparse_mask(a, mask_indices, name=None)`](#sparse_mask)
* [class tf.IndexedSlices](#IndexedSlices)
@@ -138,7 +138,7 @@ Creating a variable.
- - -
-#### tf.Variable.__init__(initial_value, trainable=True, collections=None, validate_shape=True, name=None) <a class="md-anchor" id="Variable.__init__"></a>
+#### `tf.Variable.__init__(initial_value, trainable=True, collections=None, validate_shape=True, name=None)` <a class="md-anchor" id="Variable.__init__"></a>
Creates a new variable with value `initial_value`.
@@ -181,7 +181,7 @@ variable to its initial value.
- - -
-#### tf.Variable.initialized_value() <a class="md-anchor" id="Variable.initialized_value"></a>
+#### `tf.Variable.initialized_value()` <a class="md-anchor" id="Variable.initialized_value"></a>
Returns the value of the initialized variable.
@@ -208,7 +208,7 @@ Changing a variable value.
- - -
-#### tf.Variable.assign(value, use_locking=False) <a class="md-anchor" id="Variable.assign"></a>
+#### `tf.Variable.assign(value, use_locking=False)` <a class="md-anchor" id="Variable.assign"></a>
Assigns a new value to the variable.
@@ -228,7 +228,7 @@ This is essentially a shortcut for `assign(self, value)`.
- - -
-#### tf.Variable.assign_add(delta, use_locking=False) <a class="md-anchor" id="Variable.assign_add"></a>
+#### `tf.Variable.assign_add(delta, use_locking=False)` <a class="md-anchor" id="Variable.assign_add"></a>
Adds a value to this variable.
@@ -248,7 +248,7 @@ Adds a value to this variable.
- - -
-#### tf.Variable.assign_sub(delta, use_locking=False) <a class="md-anchor" id="Variable.assign_sub"></a>
+#### `tf.Variable.assign_sub(delta, use_locking=False)` <a class="md-anchor" id="Variable.assign_sub"></a>
Subtracts a value from this variable.
@@ -268,7 +268,7 @@ This is essentially a shortcut for `assign_sub(self, delta)`.
- - -
-#### tf.Variable.scatter_sub(sparse_delta, use_locking=False) <a class="md-anchor" id="Variable.scatter_sub"></a>
+#### `tf.Variable.scatter_sub(sparse_delta, use_locking=False)` <a class="md-anchor" id="Variable.scatter_sub"></a>
Subtracts `IndexedSlices` from this variable.
@@ -294,7 +294,7 @@ sparse_delta.values)`.
- - -
-#### tf.Variable.count_up_to(limit) <a class="md-anchor" id="Variable.count_up_to"></a>
+#### `tf.Variable.count_up_to(limit)` <a class="md-anchor" id="Variable.count_up_to"></a>
Increments this variable until it reaches `limit`.
@@ -322,7 +322,7 @@ This is essentially a shortcut for `count_up_to(self, limit)`.
- - -
-#### tf.Variable.eval(session=None) <a class="md-anchor" id="Variable.eval"></a>
+#### `tf.Variable.eval(session=None)` <a class="md-anchor" id="Variable.eval"></a>
In a session, computes and returns the value of this variable.
@@ -362,19 +362,19 @@ Properties.
- - -
-#### tf.Variable.name <a class="md-anchor" id="Variable.name"></a>
+#### `tf.Variable.name` <a class="md-anchor" id="Variable.name"></a>
The name of this variable.
- - -
-#### tf.Variable.dtype <a class="md-anchor" id="Variable.dtype"></a>
+#### `tf.Variable.dtype` <a class="md-anchor" id="Variable.dtype"></a>
The `DType` of this variable.
- - -
-#### tf.Variable.get_shape() <a class="md-anchor" id="Variable.get_shape"></a>
+#### `tf.Variable.get_shape()` <a class="md-anchor" id="Variable.get_shape"></a>
The `TensorShape` of this variable.
@@ -385,25 +385,25 @@ The `TensorShape` of this variable.
- - -
-#### tf.Variable.device <a class="md-anchor" id="Variable.device"></a>
+#### `tf.Variable.device` <a class="md-anchor" id="Variable.device"></a>
The device of this variable.
- - -
-#### tf.Variable.initializer <a class="md-anchor" id="Variable.initializer"></a>
+#### `tf.Variable.initializer` <a class="md-anchor" id="Variable.initializer"></a>
The initializer operation for this variable.
- - -
-#### tf.Variable.graph <a class="md-anchor" id="Variable.graph"></a>
+#### `tf.Variable.graph` <a class="md-anchor" id="Variable.graph"></a>
The `Graph` of this variable.
- - -
-#### tf.Variable.op <a class="md-anchor" id="Variable.op"></a>
+#### `tf.Variable.op` <a class="md-anchor" id="Variable.op"></a>
The `Operation` of this variable.
@@ -416,7 +416,7 @@ collected in the graph.
- - -
-### tf.all_variables() <a class="md-anchor" id="all_variables"></a>
+### `tf.all_variables()` <a class="md-anchor" id="all_variables"></a>
Returns all variables collected in the graph.
@@ -431,7 +431,7 @@ contents of that collection.
- - -
-### tf.trainable_variables() <a class="md-anchor" id="trainable_variables"></a>
+### `tf.trainable_variables()` <a class="md-anchor" id="trainable_variables"></a>
Returns all variables created with `trainable=True`.
@@ -448,7 +448,7 @@ contents of that collection.
- - -
-### tf.initialize_all_variables() <a class="md-anchor" id="initialize_all_variables"></a>
+### `tf.initialize_all_variables()` <a class="md-anchor" id="initialize_all_variables"></a>
Returns an Op that initializes all variables.
@@ -461,7 +461,7 @@ This is just a shortcut for `initialize_variables(all_variables())`
- - -
-### tf.initialize_variables(var_list, name='init') <a class="md-anchor" id="initialize_variables"></a>
+### `tf.initialize_variables(var_list, name='init')` <a class="md-anchor" id="initialize_variables"></a>
Returns an Op that initializes a list of variables.
@@ -488,7 +488,7 @@ be run. That Op just has no effect.
- - -
-### tf.assert_variables_initialized(var_list=None) <a class="md-anchor" id="assert_variables_initialized"></a>
+### `tf.assert_variables_initialized(var_list=None)` <a class="md-anchor" id="assert_variables_initialized"></a>
Returns an Op to check if variables are initialized.
@@ -591,7 +591,7 @@ protocol buffer file in the call to `save()`.
- - -
-#### tf.train.Saver.__init__(var_list=None, reshape=False, sharded=False, max_to_keep=5, keep_checkpoint_every_n_hours=10000.0, name=None, restore_sequentially=False, saver_def=None, builder=None) <a class="md-anchor" id="Saver.__init__"></a>
+#### `tf.train.Saver.__init__(var_list=None, reshape=False, sharded=False, max_to_keep=5, keep_checkpoint_every_n_hours=10000.0, name=None, restore_sequentially=False, saver_def=None, builder=None)` <a class="md-anchor" id="Saver.__init__"></a>
Creates a `Saver`.
@@ -662,7 +662,7 @@ checkpoints per device.
- - -
-#### tf.train.Saver.save(sess, save_path, global_step=None, latest_filename=None) <a class="md-anchor" id="Saver.save"></a>
+#### `tf.train.Saver.save(sess, save_path, global_step=None, latest_filename=None)` <a class="md-anchor" id="Saver.save"></a>
Saves variables.
@@ -702,7 +702,7 @@ path can be passed directly to a call to `restore()`.
- - -
-#### tf.train.Saver.restore(sess, save_path) <a class="md-anchor" id="Saver.restore"></a>
+#### `tf.train.Saver.restore(sess, save_path)` <a class="md-anchor" id="Saver.restore"></a>
Restores previously saved variables.
@@ -726,7 +726,7 @@ Other utility methods.
- - -
-#### tf.train.Saver.last_checkpoints <a class="md-anchor" id="Saver.last_checkpoints"></a>
+#### `tf.train.Saver.last_checkpoints` <a class="md-anchor" id="Saver.last_checkpoints"></a>
List of not-yet-deleted checkpoint filenames.
@@ -738,7 +738,7 @@ You can pass any of the returned values to `restore()`.
- - -
-#### tf.train.Saver.set_last_checkpoints(last_checkpoints) <a class="md-anchor" id="Saver.set_last_checkpoints"></a>
+#### `tf.train.Saver.set_last_checkpoints(last_checkpoints)` <a class="md-anchor" id="Saver.set_last_checkpoints"></a>
Sets the list of not-yet-deleted checkpoint filenames.
@@ -755,7 +755,7 @@ Sets the list of not-yet-deleted checkpoint filenames.
- - -
-#### tf.train.Saver.as_saver_def() <a class="md-anchor" id="Saver.as_saver_def"></a>
+#### `tf.train.Saver.as_saver_def()` <a class="md-anchor" id="Saver.as_saver_def"></a>
Generates a `SaverDef` representation of this saver.
@@ -768,7 +768,7 @@ Generates a `SaverDef` representation of this saver.
- - -
-### tf.train.latest_checkpoint(checkpoint_dir, latest_filename=None) <a class="md-anchor" id="latest_checkpoint"></a>
+### `tf.train.latest_checkpoint(checkpoint_dir, latest_filename=None)` <a class="md-anchor" id="latest_checkpoint"></a>
Finds the filename of latest saved checkpoint file.
@@ -788,7 +788,7 @@ Finds the filename of latest saved checkpoint file.
- - -
-### tf.train.get_checkpoint_state(checkpoint_dir, latest_filename=None) <a class="md-anchor" id="get_checkpoint_state"></a>
+### `tf.train.get_checkpoint_state(checkpoint_dir, latest_filename=None)` <a class="md-anchor" id="get_checkpoint_state"></a>
Returns CheckpointState proto from the "checkpoint" file.
@@ -810,7 +810,7 @@ proto, returns it.
- - -
-### tf.train.update_checkpoint_state(save_dir, model_checkpoint_path, all_model_checkpoint_paths=None, latest_filename=None) <a class="md-anchor" id="update_checkpoint_state"></a>
+### `tf.train.update_checkpoint_state(save_dir, model_checkpoint_path, all_model_checkpoint_paths=None, latest_filename=None)` <a class="md-anchor" id="update_checkpoint_state"></a>
Updates the content of the 'checkpoint' file.
@@ -843,7 +843,7 @@ create variables contingent on certain conditions.
- - -
-### tf.get_variable(name, shape=None, dtype=tf.float32, initializer=None, trainable=True, collections=None) <a class="md-anchor" id="get_variable"></a>
+### `tf.get_variable(name, shape=None, dtype=tf.float32, initializer=None, trainable=True, collections=None)` <a class="md-anchor" id="get_variable"></a>
Gets an existing variable with these parameters or create a new one.
@@ -890,14 +890,14 @@ the constructor is used. If that one is `None` too, a
- - -
-### tf.get_variable_scope() <a class="md-anchor" id="get_variable_scope"></a>
+### `tf.get_variable_scope()` <a class="md-anchor" id="get_variable_scope"></a>
Returns the current variable scope.
- - -
-### tf.variable_scope(name_or_scope, reuse=None, initializer=None) <a class="md-anchor" id="variable_scope"></a>
+### `tf.variable_scope(name_or_scope, reuse=None, initializer=None)` <a class="md-anchor" id="variable_scope"></a>
Returns a context for variable scope.
@@ -980,7 +980,7 @@ then all its sub-scopes become reusing as well.
- - -
-### tf.constant_initializer(value=0.0) <a class="md-anchor" id="constant_initializer"></a>
+### `tf.constant_initializer(value=0.0)` <a class="md-anchor" id="constant_initializer"></a>
Returns an initializer that generates Tensors with a single value.
@@ -997,7 +997,7 @@ Returns an initializer that generates Tensors with a single value.
- - -
-### tf.random_normal_initializer(mean=0.0, stddev=1.0, seed=None) <a class="md-anchor" id="random_normal_initializer"></a>
+### `tf.random_normal_initializer(mean=0.0, stddev=1.0, seed=None)` <a class="md-anchor" id="random_normal_initializer"></a>
Returns an initializer that generates Tensors with a normal distribution.
@@ -1018,7 +1018,7 @@ Returns an initializer that generates Tensors with a normal distribution.
- - -
-### tf.truncated_normal_initializer(mean=0.0, stddev=1.0, seed=None) <a class="md-anchor" id="truncated_normal_initializer"></a>
+### `tf.truncated_normal_initializer(mean=0.0, stddev=1.0, seed=None)` <a class="md-anchor" id="truncated_normal_initializer"></a>
Returns an initializer that generates a truncated normal distribution.
@@ -1045,7 +1045,7 @@ neural network weights and filters.
- - -
-### tf.random_uniform_initializer(minval=0.0, maxval=1.0, seed=None) <a class="md-anchor" id="random_uniform_initializer"></a>
+### `tf.random_uniform_initializer(minval=0.0, maxval=1.0, seed=None)` <a class="md-anchor" id="random_uniform_initializer"></a>
Returns an initializer that generates Tensors with a uniform distribution.
@@ -1066,7 +1066,7 @@ Returns an initializer that generates Tensors with a uniform distribution.
- - -
-### tf.uniform_unit_scaling_initializer(factor=1.0, seed=None) <a class="md-anchor" id="uniform_unit_scaling_initializer"></a>
+### `tf.uniform_unit_scaling_initializer(factor=1.0, seed=None)` <a class="md-anchor" id="uniform_unit_scaling_initializer"></a>
Returns an initializer that generates tensors without scaling variance.
@@ -1099,7 +1099,7 @@ numerically computed: for a linear layer it's 1.0, relu: ~1.43, tanh: ~1.15.
- - -
-### tf.zeros_initializer(shape, dtype=tf.float32) <a class="md-anchor" id="zeros_initializer"></a>
+### `tf.zeros_initializer(shape, dtype=tf.float32)` <a class="md-anchor" id="zeros_initializer"></a>
An adaptor for zeros() to match the Initializer spec.
@@ -1120,7 +1120,7 @@ automatically by the optimizers in most cases.
- - -
-### tf.scatter_update(ref, indices, updates, use_locking=None, name=None) <a class="md-anchor" id="scatter_update"></a>
+### `tf.scatter_update(ref, indices, updates, use_locking=None, name=None)` <a class="md-anchor" id="scatter_update"></a>
Applies sparse updates to a variable reference.
@@ -1168,7 +1168,7 @@ Requires `updates.shape = indices.shape + ref.shape[1:]`.
- - -
-### tf.scatter_add(ref, indices, updates, use_locking=None, name=None) <a class="md-anchor" id="scatter_add"></a>
+### `tf.scatter_add(ref, indices, updates, use_locking=None, name=None)` <a class="md-anchor" id="scatter_add"></a>
Adds sparse updates to a variable reference.
@@ -1217,7 +1217,7 @@ Requires `updates.shape = indices.shape + ref.shape[1:]`.
- - -
-### tf.scatter_sub(ref, indices, updates, use_locking=None, name=None) <a class="md-anchor" id="scatter_sub"></a>
+### `tf.scatter_sub(ref, indices, updates, use_locking=None, name=None)` <a class="md-anchor" id="scatter_sub"></a>
Subtracts sparse updates to a variable reference.
@@ -1264,7 +1264,7 @@ Requires `updates.shape = indices.shape + ref.shape[1:]`.
- - -
-### tf.sparse_mask(a, mask_indices, name=None) <a class="md-anchor" id="sparse_mask"></a>
+### `tf.sparse_mask(a, mask_indices, name=None)` <a class="md-anchor" id="sparse_mask"></a>
Masks elements of `IndexedSlices`.
@@ -1336,7 +1336,7 @@ which uses multi-dimensional indices and scalar values.
- - -
-#### tf.IndexedSlices.__init__(values, indices, dense_shape=None) <a class="md-anchor" id="IndexedSlices.__init__"></a>
+#### `tf.IndexedSlices.__init__(values, indices, dense_shape=None)` <a class="md-anchor" id="IndexedSlices.__init__"></a>
Creates an `IndexedSlices`.
@@ -1344,44 +1344,44 @@ Creates an `IndexedSlices`.
- - -
-#### tf.IndexedSlices.values <a class="md-anchor" id="IndexedSlices.values"></a>
+#### `tf.IndexedSlices.values` <a class="md-anchor" id="IndexedSlices.values"></a>
A `Tensor` containing the values of the slices.
- - -
-#### tf.IndexedSlices.indices <a class="md-anchor" id="IndexedSlices.indices"></a>
+#### `tf.IndexedSlices.indices` <a class="md-anchor" id="IndexedSlices.indices"></a>
A 1-D `Tensor` containing the indices of the slices.
- - -
-#### tf.IndexedSlices.dense_shape <a class="md-anchor" id="IndexedSlices.dense_shape"></a>
+#### `tf.IndexedSlices.dense_shape` <a class="md-anchor" id="IndexedSlices.dense_shape"></a>
A 1-D `Tensor` containing the shape of the corresponding dense tensor.
- - -
-#### tf.IndexedSlices.name <a class="md-anchor" id="IndexedSlices.name"></a>
+#### `tf.IndexedSlices.name` <a class="md-anchor" id="IndexedSlices.name"></a>
The name of this `IndexedSlices`.
- - -
-#### tf.IndexedSlices.dtype <a class="md-anchor" id="IndexedSlices.dtype"></a>
+#### `tf.IndexedSlices.dtype` <a class="md-anchor" id="IndexedSlices.dtype"></a>
The `DType` of elements in this tensor.
- - -
-#### tf.IndexedSlices.device <a class="md-anchor" id="IndexedSlices.device"></a>
+#### `tf.IndexedSlices.device` <a class="md-anchor" id="IndexedSlices.device"></a>
The name of the device on which `values` will be produced, or `None`.
- - -
-#### tf.IndexedSlices.op <a class="md-anchor" id="IndexedSlices.op"></a>
+#### `tf.IndexedSlices.op` <a class="md-anchor" id="IndexedSlices.op"></a>
The `Operation` that produces `values` as an output.
diff --git a/tensorflow/g3doc/api_docs/python/train.md b/tensorflow/g3doc/api_docs/python/train.md
index 3327aa20b4..c0199a020e 100644
--- a/tensorflow/g3doc/api_docs/python/train.md
+++ b/tensorflow/g3doc/api_docs/python/train.md
@@ -17,37 +17,37 @@
* [class tf.train.FtrlOptimizer](#FtrlOptimizer)
* [class tf.train.RMSPropOptimizer](#RMSPropOptimizer)
* [Gradient Computation](#AUTOGENERATED-gradient-computation)
- * [tf.gradients(ys, xs, grad_ys=None, name='gradients', colocate_gradients_with_ops=False, gate_gradients=False, aggregation_method=None)](#gradients)
+ * [`tf.gradients(ys, xs, grad_ys=None, name='gradients', colocate_gradients_with_ops=False, gate_gradients=False, aggregation_method=None)`](#gradients)
* [class tf.AggregationMethod](#AggregationMethod)
- * [tf.stop_gradient(input, name=None)](#stop_gradient)
+ * [`tf.stop_gradient(input, name=None)`](#stop_gradient)
* [Gradient Clipping](#AUTOGENERATED-gradient-clipping)
- * [tf.clip_by_value(t, clip_value_min, clip_value_max, name=None)](#clip_by_value)
- * [tf.clip_by_norm(t, clip_norm, name=None)](#clip_by_norm)
- * [tf.clip_by_average_norm(t, clip_norm, name=None)](#clip_by_average_norm)
- * [tf.clip_by_global_norm(t_list, clip_norm, use_norm=None, name=None)](#clip_by_global_norm)
- * [tf.global_norm(t_list, name=None)](#global_norm)
+ * [`tf.clip_by_value(t, clip_value_min, clip_value_max, name=None)`](#clip_by_value)
+ * [`tf.clip_by_norm(t, clip_norm, name=None)`](#clip_by_norm)
+ * [`tf.clip_by_average_norm(t, clip_norm, name=None)`](#clip_by_average_norm)
+ * [`tf.clip_by_global_norm(t_list, clip_norm, use_norm=None, name=None)`](#clip_by_global_norm)
+ * [`tf.global_norm(t_list, name=None)`](#global_norm)
* [Decaying the learning rate](#AUTOGENERATED-decaying-the-learning-rate)
- * [tf.train.exponential_decay(learning_rate, global_step, decay_steps, decay_rate, staircase=False, name=None)](#exponential_decay)
+ * [`tf.train.exponential_decay(learning_rate, global_step, decay_steps, decay_rate, staircase=False, name=None)`](#exponential_decay)
* [Moving Averages](#AUTOGENERATED-moving-averages)
* [class tf.train.ExponentialMovingAverage](#ExponentialMovingAverage)
* [Coordinator and QueueRunner](#AUTOGENERATED-coordinator-and-queuerunner)
* [class tf.train.Coordinator](#Coordinator)
* [class tf.train.QueueRunner](#QueueRunner)
- * [tf.train.add_queue_runner(qr, collection='queue_runners')](#add_queue_runner)
- * [tf.train.start_queue_runners(sess=None, coord=None, daemon=True, start=True, collection='queue_runners')](#start_queue_runners)
+ * [`tf.train.add_queue_runner(qr, collection='queue_runners')`](#add_queue_runner)
+ * [`tf.train.start_queue_runners(sess=None, coord=None, daemon=True, start=True, collection='queue_runners')`](#start_queue_runners)
* [Summary Operations](#AUTOGENERATED-summary-operations)
- * [tf.scalar_summary(tags, values, collections=None, name=None)](#scalar_summary)
- * [tf.image_summary(tag, tensor, max_images=None, collections=None, name=None)](#image_summary)
- * [tf.histogram_summary(tag, values, collections=None, name=None)](#histogram_summary)
- * [tf.nn.zero_fraction(value, name=None)](#zero_fraction)
- * [tf.merge_summary(inputs, collections=None, name=None)](#merge_summary)
- * [tf.merge_all_summaries(key='summaries')](#merge_all_summaries)
+ * [`tf.scalar_summary(tags, values, collections=None, name=None)`](#scalar_summary)
+ * [`tf.image_summary(tag, tensor, max_images=None, collections=None, name=None)`](#image_summary)
+ * [`tf.histogram_summary(tag, values, collections=None, name=None)`](#histogram_summary)
+ * [`tf.nn.zero_fraction(value, name=None)`](#zero_fraction)
+ * [`tf.merge_summary(inputs, collections=None, name=None)`](#merge_summary)
+ * [`tf.merge_all_summaries(key='summaries')`](#merge_all_summaries)
* [Adding Summaries to Event Files](#AUTOGENERATED-adding-summaries-to-event-files)
* [class tf.train.SummaryWriter](#SummaryWriter)
- * [tf.train.summary_iterator(path)](#summary_iterator)
+ * [`tf.train.summary_iterator(path)`](#summary_iterator)
* [Training utilities](#AUTOGENERATED-training-utilities)
- * [tf.train.global_step(sess, global_step_tensor)](#global_step)
- * [tf.train.write_graph(graph_def, logdir, name, as_text=True)](#write_graph)
+ * [`tf.train.global_step(sess, global_step_tensor)`](#global_step)
+ * [`tf.train.write_graph(graph_def, logdir, name, as_text=True)`](#write_graph)
<!-- TOC-END This section was generated by neural network, THANKS FOR READING! -->
@@ -120,7 +120,7 @@ opt.apply_gradients(capped_grads_and_vars)
- - -
-#### tf.train.Optimizer.__init__(use_locking, name) <a class="md-anchor" id="Optimizer.__init__"></a>
+#### `tf.train.Optimizer.__init__(use_locking, name)` <a class="md-anchor" id="Optimizer.__init__"></a>
Create a new Optimizer.
@@ -143,7 +143,7 @@ This must be called by the constructors of subclasses.
- - -
-#### tf.train.Optimizer.minimize(loss, global_step=None, var_list=None, gate_gradients=1, name=None) <a class="md-anchor" id="Optimizer.minimize"></a>
+#### `tf.train.Optimizer.minimize(loss, global_step=None, var_list=None, gate_gradients=1, name=None)` <a class="md-anchor" id="Optimizer.minimize"></a>
Add operations to minimize 'loss' by updating 'var_list'.
@@ -178,7 +178,7 @@ this function.
- - -
-#### tf.train.Optimizer.compute_gradients(loss, var_list=None, gate_gradients=1) <a class="md-anchor" id="Optimizer.compute_gradients"></a>
+#### `tf.train.Optimizer.compute_gradients(loss, var_list=None, gate_gradients=1)` <a class="md-anchor" id="Optimizer.compute_gradients"></a>
Compute gradients of "loss" for the variables in "var_list".
@@ -211,7 +211,7 @@ given variable.
- - -
-#### tf.train.Optimizer.apply_gradients(grads_and_vars, global_step=None, name=None) <a class="md-anchor" id="Optimizer.apply_gradients"></a>
+#### `tf.train.Optimizer.apply_gradients(grads_and_vars, global_step=None, name=None)` <a class="md-anchor" id="Optimizer.apply_gradients"></a>
Apply gradients to variables.
@@ -276,7 +276,7 @@ about the slots, etc.
- - -
-#### tf.train.Optimizer.get_slot_names() <a class="md-anchor" id="Optimizer.get_slot_names"></a>
+#### `tf.train.Optimizer.get_slot_names()` <a class="md-anchor" id="Optimizer.get_slot_names"></a>
Return a list of the names of slots created by the Optimizer.
@@ -289,7 +289,7 @@ See get_slot().
- - -
-#### tf.train.Optimizer.get_slot(var, name) <a class="md-anchor" id="Optimizer.get_slot"></a>
+#### `tf.train.Optimizer.get_slot(var, name)` <a class="md-anchor" id="Optimizer.get_slot"></a>
Return a slot named "name" created for "var" by the Optimizer.
@@ -320,7 +320,7 @@ Optimizer that implements the gradient descent algorithm.
- - -
-#### tf.train.GradientDescentOptimizer.__init__(learning_rate, use_locking=False, name='GradientDescent') <a class="md-anchor" id="GradientDescentOptimizer.__init__"></a>
+#### `tf.train.GradientDescentOptimizer.__init__(learning_rate, use_locking=False, name='GradientDescent')` <a class="md-anchor" id="GradientDescentOptimizer.__init__"></a>
Construct a new gradient descent optimizer.
@@ -343,7 +343,7 @@ Optimizer that implements the Adagrad algorithm.
- - -
-#### tf.train.AdagradOptimizer.__init__(learning_rate, initial_accumulator_value=0.1, use_locking=False, name='Adagrad') <a class="md-anchor" id="AdagradOptimizer.__init__"></a>
+#### `tf.train.AdagradOptimizer.__init__(learning_rate, initial_accumulator_value=0.1, use_locking=False, name='Adagrad')` <a class="md-anchor" id="AdagradOptimizer.__init__"></a>
Construct a new Adagrad optimizer.
@@ -372,7 +372,7 @@ Optimizer that implements the Momentum algorithm.
- - -
-#### tf.train.MomentumOptimizer.__init__(learning_rate, momentum, use_locking=False, name='Momentum') <a class="md-anchor" id="MomentumOptimizer.__init__"></a>
+#### `tf.train.MomentumOptimizer.__init__(learning_rate, momentum, use_locking=False, name='Momentum')` <a class="md-anchor" id="MomentumOptimizer.__init__"></a>
Construct a new Momentum optimizer.
@@ -395,7 +395,7 @@ Optimizer that implements the Adam algorithm.
- - -
-#### tf.train.AdamOptimizer.__init__(learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08, use_locking=False, name='Adam') <a class="md-anchor" id="AdamOptimizer.__init__"></a>
+#### `tf.train.AdamOptimizer.__init__(learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08, use_locking=False, name='Adam')` <a class="md-anchor" id="AdamOptimizer.__init__"></a>
Construct a new Adam optimizer.
@@ -448,7 +448,7 @@ Optimizer that implements the FTRL algorithm.
- - -
-#### tf.train.FtrlOptimizer.__init__(learning_rate, learning_rate_power=-0.5, initial_accumulator_value=0.1, l1_regularization_strength=0.0, l2_regularization_strength=0.0, use_locking=False, name='Ftrl') <a class="md-anchor" id="FtrlOptimizer.__init__"></a>
+#### `tf.train.FtrlOptimizer.__init__(learning_rate, learning_rate_power=-0.5, initial_accumulator_value=0.1, l1_regularization_strength=0.0, l2_regularization_strength=0.0, use_locking=False, name='Ftrl')` <a class="md-anchor" id="FtrlOptimizer.__init__"></a>
Construct a new FTRL optimizer.
@@ -506,7 +506,7 @@ Optimizer that implements the RMSProp algorithm.
- - -
-#### tf.train.RMSPropOptimizer.__init__(learning_rate, decay, momentum=0.0, epsilon=1e-10, use_locking=False, name='RMSProp') <a class="md-anchor" id="RMSPropOptimizer.__init__"></a>
+#### `tf.train.RMSPropOptimizer.__init__(learning_rate, decay, momentum=0.0, epsilon=1e-10, use_locking=False, name='RMSProp')` <a class="md-anchor" id="RMSPropOptimizer.__init__"></a>
Construct a new RMSProp optimizer.
@@ -534,7 +534,7 @@ functions below.
- - -
-### tf.gradients(ys, xs, grad_ys=None, name='gradients', colocate_gradients_with_ops=False, gate_gradients=False, aggregation_method=None) <a class="md-anchor" id="gradients"></a>
+### `tf.gradients(ys, xs, grad_ys=None, name='gradients', colocate_gradients_with_ops=False, gate_gradients=False, aggregation_method=None)` <a class="md-anchor" id="gradients"></a>
Constructs symbolic partial derivatives of `ys` w.r.t. x in `xs`.
@@ -601,7 +601,7 @@ be used to combine gradients in the graph:
- - -
-### tf.stop_gradient(input, name=None) <a class="md-anchor" id="stop_gradient"></a>
+### `tf.stop_gradient(input, name=None)` <a class="md-anchor" id="stop_gradient"></a>
Stops gradient computation.
@@ -647,7 +647,7 @@ gradients.
- - -
-### tf.clip_by_value(t, clip_value_min, clip_value_max, name=None) <a class="md-anchor" id="clip_by_value"></a>
+### `tf.clip_by_value(t, clip_value_min, clip_value_max, name=None)` <a class="md-anchor" id="clip_by_value"></a>
Clips tensor values to a specified min and max.
@@ -671,7 +671,7 @@ greater than `clip_value_max` are set to `clip_value_max`.
- - -
-### tf.clip_by_norm(t, clip_norm, name=None) <a class="md-anchor" id="clip_by_norm"></a>
+### `tf.clip_by_norm(t, clip_norm, name=None)` <a class="md-anchor" id="clip_by_norm"></a>
Clips tensor values to a maximum L2-norm.
@@ -703,7 +703,7 @@ an optimizer.
- - -
-### tf.clip_by_average_norm(t, clip_norm, name=None) <a class="md-anchor" id="clip_by_average_norm"></a>
+### `tf.clip_by_average_norm(t, clip_norm, name=None)` <a class="md-anchor" id="clip_by_average_norm"></a>
Clips tensor values to a maximum average L2-norm.
@@ -735,7 +735,7 @@ an optimizer.
- - -
-### tf.clip_by_global_norm(t_list, clip_norm, use_norm=None, name=None) <a class="md-anchor" id="clip_by_global_norm"></a>
+### `tf.clip_by_global_norm(t_list, clip_norm, use_norm=None, name=None)` <a class="md-anchor" id="clip_by_global_norm"></a>
Clips values of multiple tensors by the ratio of the sum of their norms.
@@ -788,7 +788,7 @@ ready before the clipping operation can be performed.
- - -
-### tf.global_norm(t_list, name=None) <a class="md-anchor" id="global_norm"></a>
+### `tf.global_norm(t_list, name=None)` <a class="md-anchor" id="global_norm"></a>
Computes the global norm of multiple tensors.
@@ -820,7 +820,7 @@ Any entries in `t_list` that are of type None are ignored.
## Decaying the learning rate <a class="md-anchor" id="AUTOGENERATED-decaying-the-learning-rate"></a>
- - -
-### tf.train.exponential_decay(learning_rate, global_step, decay_steps, decay_rate, staircase=False, name=None) <a class="md-anchor" id="exponential_decay"></a>
+### `tf.train.exponential_decay(learning_rate, global_step, decay_steps, decay_rate, staircase=False, name=None)` <a class="md-anchor" id="exponential_decay"></a>
Applies exponential decay to the learning rate.
@@ -966,7 +966,7 @@ saver.restore(...checkpoint filename...)
- - -
-#### tf.train.ExponentialMovingAverage.__init__(decay, num_updates=None, name='ExponentialMovingAverage') <a class="md-anchor" id="ExponentialMovingAverage.__init__"></a>
+#### `tf.train.ExponentialMovingAverage.__init__(decay, num_updates=None, name='ExponentialMovingAverage')` <a class="md-anchor" id="ExponentialMovingAverage.__init__"></a>
Creates a new ExponentialMovingAverage object.
@@ -992,7 +992,7 @@ move faster. If passed, the actual decay rate used is:
- - -
-#### tf.train.ExponentialMovingAverage.apply(var_list=None) <a class="md-anchor" id="ExponentialMovingAverage.apply"></a>
+#### `tf.train.ExponentialMovingAverage.apply(var_list=None)` <a class="md-anchor" id="ExponentialMovingAverage.apply"></a>
Maintains moving averages of variables.
@@ -1030,7 +1030,7 @@ variables.
- - -
-#### tf.train.ExponentialMovingAverage.average_name(var) <a class="md-anchor" id="ExponentialMovingAverage.average_name"></a>
+#### `tf.train.ExponentialMovingAverage.average_name(var)` <a class="md-anchor" id="ExponentialMovingAverage.average_name"></a>
Returns the name of the `Variable` holding the average for `var`.
@@ -1059,7 +1059,7 @@ to restore the variable from the moving average value with:
- - -
-#### tf.train.ExponentialMovingAverage.average(var) <a class="md-anchor" id="ExponentialMovingAverage.average"></a>
+#### `tf.train.ExponentialMovingAverage.average(var)` <a class="md-anchor" id="ExponentialMovingAverage.average"></a>
Returns the `Variable` holding the average of `var`.
@@ -1170,14 +1170,14 @@ except Exception:
```
- - -
-#### tf.train.Coordinator.__init__() <a class="md-anchor" id="Coordinator.__init__"></a>
+#### `tf.train.Coordinator.__init__()` <a class="md-anchor" id="Coordinator.__init__"></a>
Create a new Coordinator.
- - -
-#### tf.train.Coordinator.join(threads, stop_grace_period_secs=120) <a class="md-anchor" id="Coordinator.join"></a>
+#### `tf.train.Coordinator.join(threads, stop_grace_period_secs=120)` <a class="md-anchor" id="Coordinator.join"></a>
Wait for threads to terminate.
@@ -1208,7 +1208,7 @@ that RuntimeError.
- - -
-#### tf.train.Coordinator.request_stop(ex=None) <a class="md-anchor" id="Coordinator.request_stop"></a>
+#### `tf.train.Coordinator.request_stop(ex=None)` <a class="md-anchor" id="Coordinator.request_stop"></a>
Request that the threads stop.
@@ -1224,7 +1224,7 @@ After this is called, calls to should_stop() will return True.
- - -
-#### tf.train.Coordinator.should_stop() <a class="md-anchor" id="Coordinator.should_stop"></a>
+#### `tf.train.Coordinator.should_stop()` <a class="md-anchor" id="Coordinator.should_stop"></a>
Check if stop was requested.
@@ -1235,7 +1235,7 @@ Check if stop was requested.
- - -
-#### tf.train.Coordinator.wait_for_stop(timeout=None) <a class="md-anchor" id="Coordinator.wait_for_stop"></a>
+#### `tf.train.Coordinator.wait_for_stop(timeout=None)` <a class="md-anchor" id="Coordinator.wait_for_stop"></a>
Wait till the Coordinator is told to stop.
@@ -1271,7 +1271,7 @@ and reporting exceptions, etc.
The `QueueRunner`, combined with the `Coordinator`, helps handle these issues.
- - -
-#### tf.train.QueueRunner.__init__(queue, enqueue_ops) <a class="md-anchor" id="QueueRunner.__init__"></a>
+#### `tf.train.QueueRunner.__init__(queue, enqueue_ops)` <a class="md-anchor" id="QueueRunner.__init__"></a>
Create a QueueRunner.
@@ -1293,7 +1293,7 @@ to all be the same op, but it is expected that they all enqueue tensors in
- - -
-#### tf.train.QueueRunner.create_threads(sess, coord=None, daemon=False, start=False) <a class="md-anchor" id="QueueRunner.create_threads"></a>
+#### `tf.train.QueueRunner.create_threads(sess, coord=None, daemon=False, start=False)` <a class="md-anchor" id="QueueRunner.create_threads"></a>
Create threads to run the enqueue ops.
@@ -1332,7 +1332,7 @@ have stopped.
- - -
-#### tf.train.QueueRunner.exceptions_raised <a class="md-anchor" id="QueueRunner.exceptions_raised"></a>
+#### `tf.train.QueueRunner.exceptions_raised` <a class="md-anchor" id="QueueRunner.exceptions_raised"></a>
Exceptions raised but not handled by the `QueueRunner` threads.
@@ -1353,7 +1353,7 @@ depending on whether or not a `Coordinator` was passed to
- - -
-### tf.train.add_queue_runner(qr, collection='queue_runners') <a class="md-anchor" id="add_queue_runner"></a>
+### `tf.train.add_queue_runner(qr, collection='queue_runners')` <a class="md-anchor" id="add_queue_runner"></a>
Adds a `QueueRunner` to a collection in the graph.
@@ -1374,7 +1374,7 @@ all the collected queue runners.
- - -
-### tf.train.start_queue_runners(sess=None, coord=None, daemon=True, start=True, collection='queue_runners') <a class="md-anchor" id="start_queue_runners"></a>
+### `tf.train.start_queue_runners(sess=None, coord=None, daemon=True, start=True, collection='queue_runners')` <a class="md-anchor" id="start_queue_runners"></a>
Starts all queue runners collected in the graph.
@@ -1418,7 +1418,7 @@ details.
- - -
-### tf.scalar_summary(tags, values, collections=None, name=None) <a class="md-anchor" id="scalar_summary"></a>
+### `tf.scalar_summary(tags, values, collections=None, name=None)` <a class="md-anchor" id="scalar_summary"></a>
Outputs a `Summary` protocol buffer with scalar values.
@@ -1442,7 +1442,7 @@ summary has a summary value for each tag-value pair in `tags` and `values`.
- - -
-### tf.image_summary(tag, tensor, max_images=None, collections=None, name=None) <a class="md-anchor" id="image_summary"></a>
+### `tf.image_summary(tag, tensor, max_images=None, collections=None, name=None)` <a class="md-anchor" id="image_summary"></a>
Outputs a `Summary` protocol buffer with images.
@@ -1492,7 +1492,7 @@ build the `tag` of the summary values:
- - -
-### tf.histogram_summary(tag, values, collections=None, name=None) <a class="md-anchor" id="histogram_summary"></a>
+### `tf.histogram_summary(tag, values, collections=None, name=None)` <a class="md-anchor" id="histogram_summary"></a>
Outputs a `Summary` protocol buffer with a histogram.
@@ -1520,7 +1520,7 @@ This op reports an `OutOfRange` error if any value is not finite.
- - -
-### tf.nn.zero_fraction(value, name=None) <a class="md-anchor" id="zero_fraction"></a>
+### `tf.nn.zero_fraction(value, name=None)` <a class="md-anchor" id="zero_fraction"></a>
Returns the fraction of zeros in `value`.
@@ -1545,7 +1545,7 @@ This is useful in summaries to measure and report sparsity. For example,
- - -
-### tf.merge_summary(inputs, collections=None, name=None) <a class="md-anchor" id="merge_summary"></a>
+### `tf.merge_summary(inputs, collections=None, name=None)` <a class="md-anchor" id="merge_summary"></a>
Merges summaries.
@@ -1574,7 +1574,7 @@ in the summaries to merge use the same tag.
- - -
-### tf.merge_all_summaries(key='summaries') <a class="md-anchor" id="merge_all_summaries"></a>
+### `tf.merge_all_summaries(key='summaries')` <a class="md-anchor" id="merge_all_summaries"></a>
Merges all summaries collected in the default graph.
@@ -1612,7 +1612,7 @@ training.
- - -
-#### tf.train.SummaryWriter.__init__(logdir, graph_def=None, max_queue=10, flush_secs=120) <a class="md-anchor" id="SummaryWriter.__init__"></a>
+#### `tf.train.SummaryWriter.__init__(logdir, graph_def=None, max_queue=10, flush_secs=120)` <a class="md-anchor" id="SummaryWriter.__init__"></a>
Creates a `SummaryWriter` and an event file.
@@ -1657,7 +1657,7 @@ the event file:
- - -
-#### tf.train.SummaryWriter.add_summary(summary, global_step=None) <a class="md-anchor" id="SummaryWriter.add_summary"></a>
+#### `tf.train.SummaryWriter.add_summary(summary, global_step=None)` <a class="md-anchor" id="SummaryWriter.add_summary"></a>
Adds a `Summary` protocol buffer to the event file.
@@ -1679,7 +1679,7 @@ files.
- - -
-#### tf.train.SummaryWriter.add_event(event) <a class="md-anchor" id="SummaryWriter.add_event"></a>
+#### `tf.train.SummaryWriter.add_event(event)` <a class="md-anchor" id="SummaryWriter.add_event"></a>
Adds an event to the event file.
@@ -1691,7 +1691,7 @@ Adds an event to the event file.
- - -
-#### tf.train.SummaryWriter.add_graph(graph_def, global_step=None) <a class="md-anchor" id="SummaryWriter.add_graph"></a>
+#### `tf.train.SummaryWriter.add_graph(graph_def, global_step=None)` <a class="md-anchor" id="SummaryWriter.add_graph"></a>
Adds a `GraphDef` protocol buffer to the event file.
@@ -1709,7 +1709,7 @@ TensorBoard. Most users pass a graph in the constructor instead.
- - -
-#### tf.train.SummaryWriter.flush() <a class="md-anchor" id="SummaryWriter.flush"></a>
+#### `tf.train.SummaryWriter.flush()` <a class="md-anchor" id="SummaryWriter.flush"></a>
Flushes the event file to disk.
@@ -1719,7 +1719,7 @@ disk.
- - -
-#### tf.train.SummaryWriter.close() <a class="md-anchor" id="SummaryWriter.close"></a>
+#### `tf.train.SummaryWriter.close()` <a class="md-anchor" id="SummaryWriter.close"></a>
Flushes the event file to disk and close the file.
@@ -1729,7 +1729,7 @@ Call this method when you do not need the summary writer anymore.
- - -
-### tf.train.summary_iterator(path) <a class="md-anchor" id="summary_iterator"></a>
+### `tf.train.summary_iterator(path)` <a class="md-anchor" id="summary_iterator"></a>
An iterator for reading `Event` protocol buffers from an event file.
@@ -1777,7 +1777,7 @@ for more information about their attributes.
- - -
-### tf.train.global_step(sess, global_step_tensor) <a class="md-anchor" id="global_step"></a>
+### `tf.train.global_step(sess, global_step_tensor)` <a class="md-anchor" id="global_step"></a>
Small helper to get the global step.
@@ -1807,7 +1807,7 @@ global_step: 10
- - -
-### tf.train.write_graph(graph_def, logdir, name, as_text=True) <a class="md-anchor" id="write_graph"></a>
+### `tf.train.write_graph(graph_def, logdir, name, as_text=True)` <a class="md-anchor" id="write_graph"></a>
Writes a graph proto on disk.
diff --git a/tensorflow/g3doc/get_started/basic_usage.md b/tensorflow/g3doc/get_started/basic_usage.md
index a41ec36d56..7616c3f7ea 100644
--- a/tensorflow/g3doc/get_started/basic_usage.md
+++ b/tensorflow/g3doc/get_started/basic_usage.md
@@ -15,7 +15,7 @@ graphs. Nodes in the graph are called *ops* (short for operations). An op
takes zero or more `Tensors`, performs some computation, and produces zero or
more `Tensors`. A `Tensor` is a typed multi-dimensional array. For example,
you can represent a mini-batch of images as a 4-D array of floating point
-numbers with dimensions `[batch, height, width, channels]`).
+numbers with dimensions `[batch, height, width, channels]`.
A TensorFlow graph is a *description* of computations. To compute anything,
a graph must be launched in a `Session`. A `Session` places the graph ops onto
diff --git a/tensorflow/g3doc/get_started/index.md b/tensorflow/g3doc/get_started/index.md
index f0222e818d..9476408a54 100644
--- a/tensorflow/g3doc/get_started/index.md
+++ b/tensorflow/g3doc/get_started/index.md
@@ -53,11 +53,11 @@ of MNIST, definitely take the blue pill. If you're somewhere in between, we
suggest skimming blue, then red.
<div style="width:100%; margin:auto; margin-bottom:10px; margin-top:20px; display: flex; flex-direction: row">
- <a href="../tutorials/mnist/beginners/index.md">
- <img style="flex-grow:1; flex-shrink:1; border: 1px solid black;" src="blue_pill.png">
+ <a href="../tutorials/mnist/beginners/index.md" title="MNIST for ML Beginners tutorial">
+ <img style="flex-grow:1; flex-shrink:1; border: 1px solid black;" src="blue_pill.png" alt="MNIST for machine learning beginners tutorial" />
</a>
- <a href="../tutorials/mnist/pros/index.md">
- <img style="flex-grow:1; flex-shrink:1; border: 1px solid black;" src="red_pill.png">
+ <a href="../tutorials/mnist/pros/index.md" title="Deep MNIST for ML Experts tutorial">
+ <img style="flex-grow:1; flex-shrink:1; border: 1px solid black;" src="red_pill.png" alt="Deep MNIST for machine learning experts tutorial" />
</a>
</div>
<p style="font-size:10px;">Images licensed CC BY-SA 4.0; original by W. Carter</p>
diff --git a/tensorflow/g3doc/get_started/os_setup.md b/tensorflow/g3doc/get_started/os_setup.md
index f6b6bb4015..4db07c233b 100644
--- a/tensorflow/g3doc/get_started/os_setup.md
+++ b/tensorflow/g3doc/get_started/os_setup.md
@@ -4,36 +4,82 @@
### Ubuntu/Linux <a class="md-anchor" id="AUTOGENERATED-ubuntu-linux"></a>
-Make sure you have [pip](https://pypi.python.org/pypi/pip) installed:
+Make sure you have [pip](https://pypi.python.org/pypi/pip), the python headers,
+and (optionally) [virtualenv](https://pypi.python.org/pypi/virtualenv) installed:
-```sh
-$ sudo apt-get install python-pip
+```bash
+$ sudo apt-get install python-pip python-dev python-virtualenv
```
-Install TensorFlow:
+**Note**: All the virtualenv-related instructions are optional, but we recommend
+using the virtualenv on any multi-user system.
-```sh
+Set up a new virtualenv environment. Assuming you want to set it up in the
+directory `~/tensorflow`, run:
+
+```bash
+$ virtualenv --system-site-packages ~/tensorflow
+$ cd ~/tensorflow
+```
+
+Activate the virtualenv:
+
+```bash
+$ source bin/activate # If using bash
+$ source bin/activate.csh # If using csh
+(tensorflow)$ # Your prompt should change
+```
+
+Inside the virtualenv, install TensorFlow:
+
+```bash
# For CPU-only version
-$ sudo pip install https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.5.0-cp27-none-linux_x86_64.whl
+(tensorflow)$ pip install --upgrade https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.5.0-cp27-none-linux_x86_64.whl
-# For GPU-enabled version
-$ sudo pip install https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.5.0-cp27-none-linux_x86_64.whl
+# For GPU-enabled version (only install this version if you have the CUDA sdk installed)
+(tensorflow)$ pip install --upgrade https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.5.0-cp27-none-linux_x86_64.whl
+
+(tensorflow)$ deactivate # Deactivate the virtualenv
+$ # Your prompt should change back
```
### Mac OS X <a class="md-anchor" id="AUTOGENERATED-mac-os-x"></a>
-Make sure you have [pip](https://pypi.python.org/pypi/pip) installed:
+Make sure you have [pip](https://pypi.python.org/pypi/pip) and
+(optionally) [virtualenv](https://pypi.python.org/pypi/virtualenv) installed:
+
+**Note**: All the virtualenv-related instructions are optional, but we recommend
+using the virtualenv on any multi-user system.
If using `easy_install`:
-```sh
-$ sudo easy_install pip
+```bash
+$ sudo easy_install pip # If pip is not already installed
+$ sudo pip install --upgrade virtualenv
+```
+
+Set up a new virtualenv environment. Assuming you want to set it up in the
+directory `~/tensorflow`, run:
+
+```bash
+$ virtualenv --system-site-packages ~/tensorflow
+$ cd ~/tensorflow
+```
+
+Activate the virtualenv:
+
+```bash
+$ source bin/activate # If using bash
+$ source bin/activate.csh # If using csh
+(tensorflow)$ # Your prompt should change
```
Install TensorFlow (only CPU binary version is currently available).
-```sh
-$ sudo pip install https://storage.googleapis.com/tensorflow/mac/tensorflow-0.5.0-py2-none-any.whl
+```bash
+(tensorflow)$ pip install --upgrade https://storage.googleapis.com/tensorflow/mac/tensorflow-0.5.0-py2-none-any.whl
+(tensorflow)$ deactivate # Deactivate the virtualenv
+$ # Your prompt should change back
```
## Docker-based installation <a class="md-anchor" id="AUTOGENERATED-docker-based-installation"></a>
@@ -44,7 +90,7 @@ you avoid worrying about setting up dependencies.
First, [install Docker](http://docs.docker.com/engine/installation/). Once
Docker is up and running, you can start a container with one command:
-```sh
+```bash
$ docker run -it b.gcr.io/tensorflow/tensorflow
```
@@ -64,8 +110,28 @@ which you can use in the `docker run` command above:
## Try your first TensorFlow program <a class="md-anchor" id="AUTOGENERATED-try-your-first-tensorflow-program"></a>
-```sh
-$ python
+### (Optional) Enable GPU Support <a class="md-anchor" id="AUTOGENERATED--optional--enable-gpu-support"></a>
+
+If you installed the GPU-enabled TensorFlow pip binary, you must have the
+correct versions of the CUDA SDK and CUDNN installed on your
+system. Please see [the CUDA installation instructions](#install_cuda).
+
+You also need to set the `LD_LIBRARY_PATH` and `CUDA_HOME` environment
+variables. Consider adding the commands below to your `~/.bash_profile`. These
+assume your CUDA installation is in `/usr/local/cuda`:
+
+```bash
+export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/local/cuda/lib64"
+export CUDA_HOME=/usr/local/cuda
+```
+
+### Run TensorFlow <a class="md-anchor" id="AUTOGENERATED-run-tensorflow"></a>
+
+First, activate the TensorFlow virtualenv, then open a python terminal:
+
+```bash
+$ source ~/tensorflow/bin/activate # Assuming the tensorflow virtualenv is ~/tensorflow
+(tensorflow)$ python
>>> import tensorflow as tf
>>> hello = tf.constant('Hello, TensorFlow!')
@@ -80,20 +146,12 @@ Hello, TensorFlow!
```
-If you are running the GPU version and you see
-```sh
-ImportError: libcudart.so.7.0: cannot open shared object file: No such file or directory
-```
-
-you most likely need to set your `LD_LIBRARY_PATH` to point to the location of
-your CUDA libraries.
-
## Installing from sources <a class="md-anchor" id="source"></a>
### Clone the TensorFlow repository <a class="md-anchor" id="AUTOGENERATED-clone-the-tensorflow-repository"></a>
-```sh
-$ git clone --recurse-submodules https://tensorflow.googlesource.com/tensorflow
+```bash
+$ git clone --recurse-submodules https://github.com/tensorflow/tensorflow
```
`--recurse-submodules` is required to fetch the protobuf library that TensorFlow
@@ -108,7 +166,7 @@ Follow instructions [here](http://bazel.io/docs/install.html) to install the
dependencies for Bazel. Then download and build the Bazel source with the
following commands:
-```sh
+```bash
$ git clone https://github.com/bazelbuild/bazel.git
$ cd bazel
$ git checkout tags/0.1.0
@@ -122,14 +180,14 @@ Add the executable `output/bazel` to your `$PATH` environment variable.
#### Install other dependencies <a class="md-anchor" id="AUTOGENERATED-install-other-dependencies"></a>
-```sh
+```bash
$ sudo apt-get install python-numpy swig python-dev
```
-#### Optional: Install CUDA (GPUs on Linux) <a class="md-anchor" id="AUTOGENERATED-optional--install-cuda--gpus-on-linux-"></a>
+#### <a name="install_cuda"></a>Optional: Install CUDA (GPUs on Linux) <a class="md-anchor" id="AUTOGENERATED--a-name--install_cuda----a-optional--install-cuda--gpus-on-linux-"></a>
-In order to build TensorFlow with GPU support, both Cuda Toolkit 7.0 and CUDNN
-6.5 V2 from NVIDIA need to be installed.
+In order to build or run TensorFlow with GPU support, both Cuda Toolkit 7.0 and
+CUDNN 6.5 V2 from NVIDIA need to be installed.
TensorFlow GPU support requires having a GPU card with NVidia Compute Capability >= 3.5. Supported cards include but are not limited to:
@@ -185,7 +243,7 @@ you invoke the bazel build command.
##### Build your target with GPU support. <a class="md-anchor" id="AUTOGENERATED-build-your-target-with-gpu-support."></a>
From the root of your source tree, run:
-```sh
+```bash
$ bazel build -c opt --config=cuda //tensorflow/cc:tutorials_example_trainer
$ bazel-bin/tensorflow/cc/tutorials_example_trainer --use_gpu
@@ -234,7 +292,7 @@ Follow installation instructions [here](http://docs.scipy.org/doc/numpy/user/ins
### Create the pip package and install <a class="md-anchor" id="create-pip"></a>
-```sh
+```bash
$ bazel build -c opt //tensorflow/tools/pip_package:build_pip_package
$ bazel-bin/tensorflow/tools/pip_package/build_pip_package /tmp/tensorflow_pkg
@@ -247,7 +305,7 @@ $ pip install /tmp/tensorflow_pkg/tensorflow-0.5.0-cp27-none-linux_x86_64.whl
From the root of your source tree, run:
-```sh
+```python
$ python tensorflow/models/image/mnist/convolutional.py
Succesfully downloaded train-images-idx3-ubyte.gz 9912422 bytes.
Succesfully downloaded train-labels-idx1-ubyte.gz 28881 bytes.
diff --git a/tensorflow/g3doc/how_tos/adding_an_op/index.md b/tensorflow/g3doc/how_tos/adding_an_op/index.md
index 403629b602..fcc00753ec 100644
--- a/tensorflow/g3doc/how_tos/adding_an_op/index.md
+++ b/tensorflow/g3doc/how_tos/adding_an_op/index.md
@@ -22,8 +22,7 @@ to:
* Optionally, write a function to compute gradients for the Op.
* Optionally, write a function that describes the input and output shapes
for the Op. This allows shape inference to work with your Op.
-* Test the Op, typically in Python. If you define gradients, verify them with
- the Python [`GradientChecker`](https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/python/kernel_tests/gradient_checker.py).
+* Test the Op, typically in Python.
<!-- TOC-BEGIN This section is generated by neural network: DO NOT EDIT! -->
## Contents
@@ -34,11 +33,11 @@ to:
* [The Python Op wrapper](#AUTOGENERATED-the-python-op-wrapper)
* [The C++ Op wrapper](#AUTOGENERATED-the-c---op-wrapper)
* [Verify it works](#AUTOGENERATED-verify-it-works)
-* [Validation](#validation)
+* [Validation](#Validation)
* [Op registration](#AUTOGENERATED-op-registration)
* [Attrs](#AUTOGENERATED-attrs)
* [Attr types](#AUTOGENERATED-attr-types)
- * [Polymorphism](#polymorphism)
+ * [Polymorphism](#Polymorphism)
* [Inputs and Outputs](#AUTOGENERATED-inputs-and-outputs)
* [Backwards compatibility](#AUTOGENERATED-backwards-compatibility)
* [GPU Support](#mult-archs)
@@ -192,7 +191,6 @@ statement
A good way to verify that you've successfully implemented your Op is to write a
test for it. Create the file
`tensorflow/python/kernel_tests/zero_out_op_test.py` with the contents:
-[TODO]:# (put tests somewhere else and make sure it works)
```python
import tensorflow as tf
@@ -211,7 +209,7 @@ Then run your test:
$ bazel test tensorflow/python:zero_out_op_test
```
-## Validation <a class="md-anchor" id="validation"></a>
+## Validation <a class="md-anchor" id="Validation"></a>
The example above assumed that the Op applied to a tensor of any shape. What
if it only applied to vectors? That means adding a check to the above OpKernel
@@ -278,8 +276,8 @@ instead of only the 0th element, you can register the Op like so:
<code class="lang-c++"><pre>
REGISTER\_OP("ZeroOut")
- <b>.Attr("preserve_index: int")</b>
- .Input("to_zero: int32")
+ <b>.Attr("preserve\_index: int")</b>
+ .Input("to\_zero: int32")
.Output("zeroed: int32");
</pre></code>
@@ -291,14 +289,19 @@ class ZeroOutOp : public OpKernel {
public:
explicit ZeroOutOp(OpKernelConstruction\* context) : OpKernel(context) {<b>
// Get the index of the value to preserve
- OP_REQUIRES_OK(context->GetAttr("preserve\_index", &preserve\_index\_));
+ OP\_REQUIRES\_OK(context,
+ context-&gt;GetAttr("preserve\_index", &preserve\_index\_));
+ // Check that preserve\_index is positive
+ OP\_REQUIRES(context, preserve\_index_ &gt;= 0,
+ errors::InvalidArgument("Need preserve\_index &gt;= 0, got ",
+ preserve\_index_));
</b>}
void Compute(OpKernelContext\* context) override {
// ...
}
<b>private:
int preserve\_index\_;</b>
-}
+};
</pre></code>
which can then be used in the `Compute` method:
@@ -306,18 +309,19 @@ which can then be used in the `Compute` method:
<code class="lang-c++"><pre>
void Compute(OpKernelContext\* context) override {
// ...
- // Set all the elements of the output tensor to 0
+<br/> <b>// Check that preserve_index is in range
+ OP\_REQUIRES(context, preserve\_index_ &lt; input.dimension(0),
+ errors::InvalidArgument("preserve\_index out of range"));<br/>
+ </b>// Set all the elements of the output tensor to 0
const int N = input.size();
- for (int i=0; i < N; i++) {
+ for (int i = 0; i < N; i++) {
output\_flat(i) = 0;
- }<br>
+ }<br/>
<b>// Preserve the requested input value
output\_flat(preserve\_index\_) = input(preserve\_index\_);</b>
}
</pre></code>
-[TODO]:# (check the code in this section in and test it)
-
> To preserve [backwards compatibility](#backwards-compatibility), you should
> specify a [default value](#default-values-constraints) when adding an attr to
> an existing op:
@@ -446,7 +450,7 @@ REGISTER_OP("AttrDefaultExampleForAllTypes")
Note in particular that the values of type `type` use [the `DT_*` names
for the types](../../resources/dims_types.md#data-types).
-### Polymorphism <a class="md-anchor" id="polymorphism"></a>
+### Polymorphism <a class="md-anchor" id="Polymorphism"></a>
#### Type Polymorphism <a class="md-anchor" id="type-polymorphism"></a>
For ops that can take different types as input or produce different output
@@ -658,7 +662,7 @@ able to use a macro provided by
#include "tensorflow/core/framework/register_types.h"
REGISTER_OP("ZeroOut")
- .Attr("T: realnumbertypes")
+ .Attr("T: realnumbertype")
.Input("to_zero: T")
.Output("zeroed: T");
@@ -786,7 +790,7 @@ expressions:
* `<attr-type>`, where `<attr-type>` is the name of an [Attr](#attrs) with type
`type` or `list(type)` (with a possible type restriction). This syntax allows
- for [polymorphic ops](#polymorphism).
+ for [polymorphic ops](#Polymorphism).
```c++
REGISTER_OP("PolymorphicSingleInput")
@@ -898,7 +902,7 @@ create a new operation with a new name with the new semantics.
## GPU Support <a class="md-anchor" id="mult-archs"></a>
You can implement different OpKernels and register one for CPU and another for
-GPU, just like you can [register kernels for different types](#polymorphism).
+GPU, just like you can [register kernels for different types](#Polymorphism).
There are several examples of kernels with GPU support in
[tensorflow/core/kernels/](https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/core/kernels/).
Notice some kernels have a CPU version in a `.cc` file, a GPU version in a file
@@ -925,7 +929,80 @@ kept on the CPU, add a `HostMemory()` call to the kernel registration, e.g.:
## Implement the gradient in Python <a class="md-anchor" id="AUTOGENERATED-implement-the-gradient-in-python"></a>
-[TODO]:# (Write this!)
+Given a graph of ops, TensorFlow uses automatic differentiation
+(backpropagation) to add new ops representing gradients with respect to the
+existing ops (see
+[Gradient Computation](../../api_docs/python/train.md#gradient-computation)).
+To make automatic differentiation work for new ops, you must register a gradient
+function which computes gradients with respect to the ops' inputs given
+gradients with respect to the ops' outputs.
+
+Mathematically, if an op computes \\(y = f(x)\\) the registered gradient op
+converts gradients \\(\partial / \partial y\\) with respect to \\(y\\) into
+gradients \\(\partial / \partial x\\) with respect to \\(x\\) via the chain
+rule:
+
+$$\frac{\partial}{\partial x}
+ = \frac{\partial}{\partial y} \frac{\partial y}{\partial x}
+ = \frac{\partial}{\partial y} \frac{\partial f}{\partial x}.$$
+
+In the case of `ZeroOut`, only one entry in the input affects the output, so the
+gradient with respect to the input is a sparse "one hot" tensor. This is
+expressed as follows:
+
+```python
+from tensorflow.python.framework import ops
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import sparse_ops
+
+@ops.RegisterGradient("ZeroOut")
+def _zero_out_grad(op, grad):
+ """The gradients for `zero_out`.
+
+ Args:
+ op: The `zero_out` `Operation` that we are differentiating, which we can use
+ to find the inputs and outputs of the original op.
+ grad: Gradient with respect to the output of the `zero_out` op.
+
+ Returns:
+ Gradients with respect to the input of `zero_out`.
+ """
+ to_zero = op.inputs[0]
+ shape = array_ops.shape(to_zero)
+ index = array_ops.zeros_like(shape)
+ first_grad = array_ops.reshape(grad, [-1])[0]
+ to_zero_grad = sparse_ops.sparse_to_dense(index, shape, first_grad, 0)
+ return [to_zero_grad] # List of one Tensor, since we have one input
+```
+
+Details about registering gradient functions with
+[`ops.RegisterGradient`](../../api_docs/python/framework.md#RegisterGradient):
+
+* For an op with one output, the gradient function will take an
+ [`Operation`](../../api_docs/python/framework.md#Operation) `op` and a
+ [`Tensor`](../../api_docs/python/framework.md#Tensor) `grad` and build new ops
+ out of the tensors
+ [`op.inputs[i]`](../../api_docs/python/framework.md#Operation.inputs),
+ [`op.outputs[i]`](../../api_docs/python/framework.md#Operation.outputs), and `grad`. Information
+ about any attrs can be found via
+ [`op.get_attr`](../../api_docs/python/framework.md#Operation.get_attr).
+
+* If the op has multiple outputs, the gradient function will take `op` and
+ `grads`, where `grads` is a list of gradients with respect to each output.
+ The result of the gradient function must be a list of `Tensor` objects
+ representing the gradients with respect to each input.
+
+* If there is no well-defined gradient for some input, such as for integer
+ inputs used as indices, the corresponding returned gradient should be
+ `None`. For example, for an op taking a floating point tensor `x` and an
+ integer index `i`, the gradient function would `return [x_grad, None]`.
+
+* If there is no meaningful gradient for the op at all, use
+ `ops.NoGradient("OpName")` to disable automatic differentiation.
+
+Note that at the time the gradient function is called, only the data flow graph
+of ops is available, not the tensor data itself. Thus, all computation must be
+performed using other tensorflow ops, to be run at graph execution time.
## Implement a shape function in Python <a class="md-anchor" id="AUTOGENERATED-implement-a-shape-function-in-python"></a>
@@ -956,7 +1033,7 @@ def _zero_out_shape(op):
```
A shape function can also constrain the shape of an input. For the version of
-[ZeroOut with a vector shape constraint](#validation), the shape function
+[ZeroOut with a vector shape constraint](#Validation), the shape function
would be as follows:
```python
@@ -971,7 +1048,7 @@ def _zero_out_shape(op):
return [input_shape]
```
-If your op is [polymorphic with multiple inputs](#polymorphism), use the
+If your op is [polymorphic with multiple inputs](#Polymorphism), use the
properties of the operation to determine the number of shapes to check:
```
diff --git a/tensorflow/g3doc/how_tos/adding_an_op/zero_out_2_test.py b/tensorflow/g3doc/how_tos/adding_an_op/zero_out_2_test.py
new file mode 100644
index 0000000000..ce38e435fa
--- /dev/null
+++ b/tensorflow/g3doc/how_tos/adding_an_op/zero_out_2_test.py
@@ -0,0 +1,28 @@
+"""Test for version 2 of the zero_out op."""
+
+import tensorflow.python.platform
+
+import tensorflow as tf
+from tensorflow.g3doc.how_tos.adding_an_op import gen_zero_out_op_2
+from tensorflow.g3doc.how_tos.adding_an_op import zero_out_grad_2
+from tensorflow.python.kernel_tests import gradient_checker
+
+
+class ZeroOut2Test(tf.test.TestCase):
+
+ def test(self):
+ with self.test_session():
+ result = gen_zero_out_op_2.zero_out([5, 4, 3, 2, 1])
+ self.assertAllEqual(result.eval(), [5, 0, 0, 0, 0])
+
+ def test_grad(self):
+ with self.test_session():
+ shape = (5,)
+ x = tf.constant([5, 4, 3, 2, 1], dtype=tf.float32)
+ y = gen_zero_out_op_2.zero_out(x)
+ err = gradient_checker.ComputeGradientError(x, shape, y, shape)
+ self.assertLess(err, 1e-4)
+
+
+if __name__ == '__main__':
+ tf.test.main()
diff --git a/tensorflow/g3doc/how_tos/adding_an_op/zero_out_3_test.py b/tensorflow/g3doc/how_tos/adding_an_op/zero_out_3_test.py
new file mode 100644
index 0000000000..eaf45d1ec4
--- /dev/null
+++ b/tensorflow/g3doc/how_tos/adding_an_op/zero_out_3_test.py
@@ -0,0 +1,35 @@
+"""Test for version 3 of the zero_out op."""
+
+import tensorflow.python.platform
+
+import tensorflow as tf
+from tensorflow.g3doc.how_tos.adding_an_op import gen_zero_out_op_3
+
+
+class ZeroOut3Test(tf.test.TestCase):
+
+ def test(self):
+ with self.test_session():
+ result = gen_zero_out_op_3.zero_out([5, 4, 3, 2, 1])
+ self.assertAllEqual(result.eval(), [5, 0, 0, 0, 0])
+
+ def testAttr(self):
+ with self.test_session():
+ result = gen_zero_out_op_3.zero_out([5, 4, 3, 2, 1], preserve_index=3)
+ self.assertAllEqual(result.eval(), [0, 0, 0, 2, 0])
+
+ def testNegative(self):
+ with self.test_session():
+ result = gen_zero_out_op_3.zero_out([5, 4, 3, 2, 1], preserve_index=-1)
+ with self.assertRaisesOpError("Need preserve_index >= 0, got -1"):
+ result.eval()
+
+ def testLarge(self):
+ with self.test_session():
+ result = gen_zero_out_op_3.zero_out([5, 4, 3, 2, 1], preserve_index=17)
+ with self.assertRaisesOpError("preserve_index out of range"):
+ result.eval()
+
+
+if __name__ == '__main__':
+ tf.test.main()
diff --git a/tensorflow/g3doc/how_tos/adding_an_op/zero_out_grad_2.py b/tensorflow/g3doc/how_tos/adding_an_op/zero_out_grad_2.py
new file mode 100644
index 0000000000..61fb92db27
--- /dev/null
+++ b/tensorflow/g3doc/how_tos/adding_an_op/zero_out_grad_2.py
@@ -0,0 +1,25 @@
+"""The gradient of the tutorial zero_out op."""
+
+from tensorflow.python.framework import ops
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import sparse_ops
+
+
+@ops.RegisterGradient("ZeroOut")
+def _zero_out_grad(op, grad):
+ """The gradients for `zero_out`.
+
+ Args:
+ op: The `zero_out` `Operation` that we are differentiating, which we can use
+ to find the inputs and outputs of the original op.
+ grad: Gradient with respect to the output of the `zero_out` op.
+
+ Returns:
+ Gradients with respect to the input of `zero_out`.
+ """
+ to_zero = op.inputs[0]
+ shape = array_ops.shape(to_zero)
+ index = array_ops.zeros_like(shape)
+ first_grad = array_ops.reshape(grad, [-1])[0]
+ to_zero_grad = sparse_ops.sparse_to_dense(index, shape, first_grad, 0)
+ return [to_zero_grad] # List of one Tensor, since we have one input
diff --git a/tensorflow/g3doc/how_tos/adding_an_op/zero_out_op_kernel_1.cc b/tensorflow/g3doc/how_tos/adding_an_op/zero_out_op_kernel_1.cc
index e960adc047..8a896061d5 100644
--- a/tensorflow/g3doc/how_tos/adding_an_op/zero_out_op_kernel_1.cc
+++ b/tensorflow/g3doc/how_tos/adding_an_op/zero_out_op_kernel_1.cc
@@ -11,7 +11,6 @@ Zeros out all but the first value of a Tensor.
zeroed: A Tensor whose first value is identical to `to_zero`, and 0
otherwise.
-
)doc");
class ZeroOutOp : public OpKernel {
diff --git a/tensorflow/g3doc/how_tos/adding_an_op/register_kernels.cc b/tensorflow/g3doc/how_tos/adding_an_op/zero_out_op_kernel_2.cc
index 3d2f50d16e..0678af23ee 100644
--- a/tensorflow/g3doc/how_tos/adding_an_op/register_kernels.cc
+++ b/tensorflow/g3doc/how_tos/adding_an_op/zero_out_op_kernel_2.cc
@@ -3,6 +3,39 @@
using namespace tensorflow;
+REGISTER_OP("ZeroOut")
+ .Attr("T: realnumbertype")
+ .Input("to_zero: T")
+ .Output("zeroed: T")
+ .Doc(R"doc(
+Zeros out all but the first value of a Tensor.
+
+zeroed: A Tensor whose first value is identical to `to_zero`, and 0
+ otherwise.
+)doc");
+
+REGISTER_OP("ZeroOut2")
+ .Attr("T: realnumbertype")
+ .Input("to_zero: T")
+ .Output("zeroed: T")
+ .Doc(R"doc(
+Zeros out all but the first value of a Tensor.
+
+zeroed: A Tensor whose first value is identical to `to_zero`, and 0
+ otherwise.
+)doc");
+
+REGISTER_OP("ZeroOut3")
+ .Attr("T: realnumbertype")
+ .Input("to_zero: T")
+ .Output("zeroed: T")
+ .Doc(R"doc(
+Zeros out all but the first value of a Tensor.
+
+zeroed: A Tensor whose first value is identical to `to_zero`, and 0
+ otherwise.
+)doc");
+
template <typename T>
class ZeroOutOp : public OpKernel {
public:
@@ -45,7 +78,7 @@ REGISTER_KERNEL_BUILDER(Name("ZeroOut")
#define REGISTER_KERNEL(type) \
REGISTER_KERNEL_BUILDER( \
- Name("ZeroOut").Device(DEVICE_CPU).TypeConstraint<type>("T"), \
+ Name("ZeroOut2").Device(DEVICE_CPU).TypeConstraint<type>("T"), \
ZeroOutOp<type>)
REGISTER_KERNEL(float);
@@ -56,7 +89,7 @@ REGISTER_KERNEL(int32);
#define REGISTER_KERNEL(type) \
REGISTER_KERNEL_BUILDER( \
- Name("ZeroOut").Device(DEVICE_CPU).TypeConstraint<type>("T"), \
+ Name("ZeroOut3").Device(DEVICE_CPU).TypeConstraint<type>("T"), \
ZeroOutOp<type>)
TF_CALL_REAL_NUMBER_TYPES(REGISTER_KERNEL);
diff --git a/tensorflow/g3doc/how_tos/adding_an_op/zero_out_op_kernel_3.cc b/tensorflow/g3doc/how_tos/adding_an_op/zero_out_op_kernel_3.cc
new file mode 100644
index 0000000000..ebe26cf3e3
--- /dev/null
+++ b/tensorflow/g3doc/how_tos/adding_an_op/zero_out_op_kernel_3.cc
@@ -0,0 +1,52 @@
+#include "tensorflow/core/framework/op.h"
+#include "tensorflow/core/framework/op_kernel.h"
+
+using namespace tensorflow;
+
+REGISTER_OP("ZeroOut")
+ .Attr("preserve_index: int = 0")
+ .Input("to_zero: int32")
+ .Output("zeroed: int32");
+
+class ZeroOutOp : public OpKernel {
+ public:
+ explicit ZeroOutOp(OpKernelConstruction* context) : OpKernel(context) {
+ // Get the index of the value to preserve
+ OP_REQUIRES_OK(context,
+ context->GetAttr("preserve_index", &preserve_index_));
+ // Check that preserve\_index is positive
+ OP_REQUIRES(context, preserve_index_ >= 0,
+ errors::InvalidArgument("Need preserve_index >= 0, got ",
+ preserve_index_));
+ }
+
+ void Compute(OpKernelContext* context) override {
+ // Grab the input tensor
+ const Tensor& input_tensor = context->input(0);
+ auto input = input_tensor.flat<int32>();
+
+ // Check that preserve_index is in range
+ OP_REQUIRES(context, preserve_index_ < input.dimension(0),
+ errors::InvalidArgument("preserve_index out of range"));
+
+ // Create an output tensor
+ Tensor* output_tensor = NULL;
+ OP_REQUIRES_OK(context, context->allocate_output(0, input_tensor.shape(),
+ &output_tensor));
+ auto output = output_tensor->template flat<int32>();
+
+ // Set all the elements of the output tensor to 0
+ const int N = input.size();
+ for (int i = 0; i < N; i++) {
+ output(i) = 0;
+ }
+
+ // Preserve the requested input value
+ output(preserve_index_) = input(preserve_index_);
+ }
+
+ private:
+ int preserve_index_;
+};
+
+REGISTER_KERNEL_BUILDER(Name("ZeroOut").Device(DEVICE_CPU), ZeroOutOp);
diff --git a/tensorflow/g3doc/how_tos/graph_viz/index.md b/tensorflow/g3doc/how_tos/graph_viz/index.md
index 81c4a9f247..ece21ed342 100644
--- a/tensorflow/g3doc/how_tos/graph_viz/index.md
+++ b/tensorflow/g3doc/how_tos/graph_viz/index.md
@@ -9,7 +9,13 @@ To see your own graph, run TensorBoard pointing it to the log directory of the j
## Name scoping and nodes <a class="md-anchor" id="AUTOGENERATED-name-scoping-and-nodes"></a>
-Typical TensorFlow graphs can have many thousands of nodes--far too many to see easily all at once, or even to lay out using standard graph tools. To simplify, variable's name can be scoped and the visualization uses this information to define a hierarchy structure on the nodes in the graph, and by default only shows the top of this hierarchy. Here is an example that defines three operations under the `hidden` name scope using [`tf.name_scope()`](https://tensorflow.org/api_docs/python/framework.html?cl=head#name_scope):
+Typical TensorFlow graphs can have many thousands of nodes--far too many to see
+easily all at once, or even to lay out using standard graph tools. To simplify,
+variable names can be scoped and the visualization uses this information to
+define a hierarchy on the nodes in the graph. By default, only the top of this
+hierarchy is shown. Here is an example that defines three operations under the
+`hidden` name scope using
+[`tf.name_scope`](../../api_docs/python/framework.md#name_scope):
```python
import tensorflow as tf
@@ -26,10 +32,10 @@ This results in the following three op names:
* *hidden*/weights
* *hidden*/biases
-The visualization will, by default, collapse all three into a node labeled `hidden`.
+By default, the visualization will collapse all three into a node labeled `hidden`.
The extra detail isn't lost. You can double-click, or click
on the orange `+` sign in the top right to expand the node, and then you'll see
-three subnodes, for `alpha`, `weights` and `biases`.
+three subnodes for `alpha`, `weights` and `biases`.
Here's a real-life example of a more complicated node in its initial and
expanded states.
@@ -72,8 +78,8 @@ between the `init` node and its dependencies would create a very cluttered
view.
To reduce clutter, the visualization separates out all high-degree nodes to an
-"auxiliary" area on the right and doesn't draw lines to represent their edges.
-Instead of lines, we draw small "node icons" to indicate the connections.
+*auxiliary* area on the right and doesn't draw lines to represent their edges.
+Instead of lines, we draw small *node icons* to indicate the connections.
Separating out the auxiliary nodes typically doesn't remove critical
information since these nodes are usually related to bookkeeping functions.
@@ -96,9 +102,9 @@ information since these nodes are usually related to bookkeeping functions.
</tr>
</table>
-One last structural simplification is "series collapsing". Sequential
+One last structural simplification is *series collapsing*. Sequential
motifs--that is, nodes whose names differ by a number at the end and have
-isomorphic structures--are collapsed into a single "stack" of nodes, as shown
+isomorphic structures--are collapsed into a single *stack* of nodes, as shown
below. For networks with long sequences, this greatly simplifies the view. As
with hierarchical nodes, double-clicking expands the series.
@@ -126,7 +132,7 @@ for constants and summary nodes. To summarize, here's a table of node symbols:
Symbol | Meaning
--- | ---
-![Name scope](./namespace_node.png "Name scope") | "High-level" node representing a name scope. Double-click to expand a high-level node.
+![Name scope](./namespace_node.png "Name scope") | *High-level* node representing a name scope. Double-click to expand a high-level node.
![Sequence of unconnected nodes](./horizontal_stack.png "Sequence of unconnected nodes") | Sequence of numbered nodes that are not connected to each other.
![Sequence of connected nodes](./vertical_stack.png "Sequence of connected nodes") | Sequence of numbered nodes that are connected to each other.
![Operation node](./op_node.png "Operation node") | An individual operation node.
@@ -169,7 +175,7 @@ right corner of the visualization.
</table>
Selection can also be helpful in understanding high-degree nodes. Select any
-high-degree node, and the corresponding "node icons" for its other connections
+high-degree node, and the corresponding node icons for its other connections
will be selected as well. This makes it easy, for example, to see which nodes
are being saved--and which aren't.
@@ -177,7 +183,7 @@ Clicking on a node name in the info card will select it. If necessary, the
viewpoint will automatically pan so that the node is visible.
Finally, you can choose two color schemes for your graph, using the color menu
-above the legend. The default "Structure View" shows structure: when two
+above the legend. The default *Structure View* shows structure: when two
high-level nodes have the same structure, they appear in the same color of the
rainbow. Uniquely structured nodes are gray. There's a second view, which shows
what device the different operations run on. Name scopes are colored
diff --git a/tensorflow/g3doc/how_tos/summaries_and_tensorboard/index.md b/tensorflow/g3doc/how_tos/summaries_and_tensorboard/index.md
index cf06cf70fc..26c3856993 100644
--- a/tensorflow/g3doc/how_tos/summaries_and_tensorboard/index.md
+++ b/tensorflow/g3doc/how_tos/summaries_and_tensorboard/index.md
@@ -4,12 +4,12 @@ The computations you'll use TensorBoard for - like training a massive
deep neural network - can be complex and confusing. To make it easier to
understand, debug, and optimize TensorFlow programs, we've included a suite of
visualization tools called TensorBoard. You can use TensorBoard to visualize
-your TensorFlow graph, quantitative metrics about the execution of your graph,
-and even additional data like images that pass through it. When TensorBoard is
-fully configured, it looks like this:
+your TensorFlow graph, plot quantitative metrics about the execution of your
+graph, and show additional data like images that pass through it. When
+TensorBoard is fully configured, it looks like this:
![MNIST TensorBoard](./mnist_tensorboard.png "MNIST TensorBoard") If you're on
-desktop Chrome or FF, try playing around with [this live
+desktop Chrome or Firefox, try playing around with [this live
TensorBoard](/tensorboard/cifar.html).
@@ -25,12 +25,12 @@ data from, and decide which nodes you would like to annotate with
[summary operations]
(../../api_docs/python/train.md#summary-operations).
-For example, suppose that you are creating a convolutional neural network for
-training MNIST digits recognition. You'd like to record how the learning rate
+For example, suppose you are training a convolutional neural network for
+recognizing MNIST digits. You'd like to record how the learning rate
varies over time, and how the objective function is changing. Collect these by
attaching [`scalar_summary`](../../api_docs/python/train.md#scalar_summary) ops
to the nodes that output the learning rate and loss respectively. Then, give
-each `scalar_summary` a meaningful `tag`, like `'learning rate'` and `'loss
+each `scalar_summary` a meaningful `tag`, like `'learning rate'` or `'loss
function'`.
Perhaps you'd also like to visualize the distributions of activations coming
@@ -85,15 +85,18 @@ You're now all set to visualize this data using TensorBoard.
## Launching TensorBoard <a class="md-anchor" id="AUTOGENERATED-launching-tensorboard"></a>
To run TensorBoard, use the command
-`python tensorflow/tensorboard/tensorboard.py --logdir=path/to/logs`, where
-`logdir` points to the directory where the `SummaryWriter` serialized its data.
-If this `logdir` directory contains sub-directories which contain serialized
-data from separate runs, then TensorBoard will visualize the data from all of
-those runs. Once TensorBoard is running, navigate your web browser to
-localhost:6006 to view the TensorBoard.
-If you have pip installed TensorBoard, you can just simply type the command
-`tensorboard --logidr=/path/to/logs` in order to run it.
+ python tensorflow/tensorboard/tensorboard.py --logdir=path/to/log-directory
+
+where `logdir` points to the directory where the `SummaryWriter` serialized its
+data. If this `logdir` directory contains subdirectories which contain
+serialized data from separate runs, then TensorBoard will visualize the data
+from all of those runs. Once TensorBoard is running, navigate your web browser
+to `localhost:6006` to view the TensorBoard.
+
+If you have pip installed TensorBoard, you can use the simpler command
+
+ tensorboard --logdir=/path/to/log-directory
When looking at TensorBoard, you will see the navigation tabs in the top right
corner. Each tab represents a set of serialized data that can be visualized.
@@ -101,5 +104,5 @@ For any tab you are looking at, if the logs being looked at by TensorBoard do
not contain any data relevant to that tab, a message will be displayed
indicating how to serialize data that is applicable to that tab.
-For in depth information on how to use the "graph" tab to visualize your graph,
+For in depth information on how to use the *graph* tab to visualize your graph,
see [TensorBoard: Visualizing your graph](../graph_viz/index.md).
diff --git a/tensorflow/g3doc/how_tos/variables/index.md b/tensorflow/g3doc/how_tos/variables/index.md
index 23fa8d71f3..65e80e8c00 100644
--- a/tensorflow/g3doc/how_tos/variables/index.md
+++ b/tensorflow/g3doc/how_tos/variables/index.md
@@ -150,7 +150,7 @@ with tf.Session() as sess:
### Restoring Variables <a class="md-anchor" id="AUTOGENERATED-restoring-variables"></a>
The same `Saver` object is used to restore variables. Note that when you
-restore variables form a file you do not have to initialize them beforehand.
+restore variables from a file you do not have to initialize them beforehand.
```python
# Create some variables.
diff --git a/tensorflow/g3doc/resources/faq.md b/tensorflow/g3doc/resources/faq.md
index 949806acee..474f3340bb 100644
--- a/tensorflow/g3doc/resources/faq.md
+++ b/tensorflow/g3doc/resources/faq.md
@@ -251,15 +251,20 @@ to encode the batch size as a Python constant, but instead to use a symbolic
## TensorBoard <a class="md-anchor" id="AUTOGENERATED-tensorboard"></a>
-See also the
-[how-to documentation on TensorBoard](../how_tos/graph_viz/index.md).
+#### How can I visualize a TensorFlow graph? <a class="md-anchor" id="AUTOGENERATED-how-can-i-visualize-a-tensorflow-graph-"></a>
+
+See the [graph visualization tutorial](../how_tos/graph_viz/index.md).
+
+#### What is the simplest way to send data to TensorBoard? <a class="md-anchor" id="AUTOGENERATED-what-is-the-simplest-way-to-send-data-to-tensorboard-"></a>
+
+Add summary ops to your TensorFlow graph, and use a
+[`SummaryWriter`](../api_docs/python/train.md#SummaryWriter) to write
+these summaries to a log directory. Then, start TensorBoard using
-#### What is the simplest way to send data to tensorboard? # TODO(danmane) <a class="md-anchor" id="AUTOGENERATED-what-is-the-simplest-way-to-send-data-to-tensorboard----todo-danmane-"></a>
+ python tensorflow/tensorboard/tensorboard.py --logdir=path/to/log-directory
-Add summary_ops to your TensorFlow graph, and use a SummaryWriter to write all
-of these summaries to a log directory. Then, startup TensorBoard using
-<SOME_COMMAND> and pass the --logdir flag so that it points to your
-log directory. For more details, see <YET_UNWRITTEN_TENSORBOARD_TUTORIAL>.
+For more details, see the [Summaries and TensorBoard tutorial]
+(../how_tos/summaries_and_tensorboard/index.md).
## Extending TensorFlow <a class="md-anchor" id="AUTOGENERATED-extending-tensorflow"></a>
diff --git a/tensorflow/g3doc/resources/glossary.md b/tensorflow/g3doc/resources/glossary.md
index e344d21a0c..40683a1a6c 100644
--- a/tensorflow/g3doc/resources/glossary.md
+++ b/tensorflow/g3doc/resources/glossary.md
@@ -1,7 +1,5 @@
# Glossary <a class="md-anchor" id="AUTOGENERATED-glossary"></a>
-TODO(someone): Fix several broken links in Glossary
-
**Broadcasting operation**
An operation that uses [numpy-style broadcasting](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
@@ -23,10 +21,10 @@ on a `Tensor` in a graph that has been launched in a session.
TensorFlow's mechanism for patching a tensor directly into any node in a graph
launched in a session. You apply feeds when you trigger the execution of a
graph, not when you build the graph. A feed temporarily replaces a node with a
-tensor value. You supply feed data as an argument to a run() or eval() call
+tensor value. You supply feed data as an argument to a `run()` or `eval()` call
that initiates computation. After the run the feed disappears and the original
node definition remains. You usually designate specific nodes to be "feed"
-nodes by using tf.placeholder() to create them. See
+nodes by using `tf.placeholder()` to create them. See
[Basic Usage](../get_started/basic_usage.md) for more information.
**Fetch**
@@ -34,7 +32,7 @@ nodes by using tf.placeholder() to create them. See
TensorFlow's mechanism for retrieving tensors from a graph launched in a
session. You retrieve fetches when you trigger the execution of a graph, not
when you build the graph. To fetch the tensor value of a node or nodes,
-execute the graph with a run() call on the Session object and pass a list of
+execute the graph with a `run()` call on the `Session` object and pass a list of
names of nodes to retrieve. See [Basic Usage](../get_started/basic_usage.md)
for more information.
@@ -43,69 +41,62 @@ for more information.
Describes a computation as a directed acyclic
graph. Nodes in the graph represent operations that must be
performed. Edges in the graph represent either data or control
-dependencies. GraphDef is the proto used to describe a graph to the
-system (it is the API), and consists of a collection of NodeDefs (see
-below). A GraphDef may be converted to a (C++) Graph object which is
+dependencies. `GraphDef` is the proto used to describe a graph to the
+system (it is the API), and consists of a collection of `NodeDefs` (see
+below). A `GraphDef` may be converted to a (C++) `Graph` object which is
easier to operate on.
**IndexedSlices**
In the Python API, TensorFlow's representation of a tensor that is sparse
-along only its first dimension. If the tensor is k-dimensional, an
-IndexedSlices instance logically represents a collection of (k-1)-dimensional
-slices along the tensor's first dimension. The indices of the slices are
-stored concatenated into a single 1-dimensional vector, and the corresponding
-slices are concatenated to form a single k-dimensional tensor. Use
-SparseTensor if the sparsity is not restricted to the first dimension.
+along only its first dimension. If the tensor is `k`-dimensional, an
+`IndexedSlices` instance logically represents a collection of
+`(k-1)`-dimensional slices along the tensor's first dimension. The indices of
+the slices are stored concatenated into a single 1-dimensional vector, and the
+corresponding slices are concatenated to form a single `k`-dimensional tensor. Use
+`SparseTensor` if the sparsity is not restricted to the first dimension.
**Node**
An element of a graph.
-Describes how to invoke a specific Op as one node in a specific computation
-Graph, including the values for any attrs needed to configure the Op. For Ops
-that are polymorphic, the attrs include sufficient information to completely
-determine the signature of the Node. See graph.proto for details.
+Describes how to invoke a specific operation as one node in a specific
+computation `Graph`, including the values for any `attrs` needed to configure
+the operation. For operations that are polymorphic, the `attrs` include
+sufficient information to completely determine the signature of the `Node`.
+See `graph.proto` for details.
**Op (operation)**
-In the TensorFlow runtime: A type of computation such as 'add' or 'matmul' or
-'concat'. You can add new ops to the runtime as described [how to add an
+In the TensorFlow runtime: A type of computation such as `add` or `matmul` or
+`concat`. You can add new ops to the runtime as described [how to add an
op](../how_tos/adding_an_op/index.md).
In the Python API: A node in the graph. Ops are represented by instances of
-the class [tf.Operation](../api_docs/python/framework.md#Operation). The
+the class [`tf.Operation`](../api_docs/python/framework.md#Operation). The
`type` property of an `Operation` indicates the run operation for the node,
-such as 'add' or 'matmul'.
-
-**Quantization**
-
-A reduction of numerical precision. Quantization maps floating-point values
-onto a smaller set of values, and is particular useful for improving the
-efficiency of neural networks. See TensorFlow's [neural network
-operations](../api_docs/python/nn.md?cl=head#quantized_avg_pool) for more
-information about TensorFlow's quantization support.
+such as `add` or `matmul`.
**Run**
-The action of executing ops in a launched graph. Requires that the graph be launched
-in a Session.
+The action of executing ops in a launched graph. Requires that the graph be
+launched in a `Session`.
-In the Python API: A method of the Session class:
-[tf.Session.run](../api_docs/python/client.md#Session). You can pass tensors
+In the Python API: A method of the `Session` class:
+[`tf.Session.run`](../api_docs/python/client.md#Session). You can pass tensors
to feed and fetch to the `run()` call.
-In the C++ API: A method of the [tensorflow::Session](../api_docs/cc/ClassSession.md).
+In the C++ API: A method of the [`tensorflow::Session`](../api_docs/cc/ClassSession.md).
**Session**
A runtime object representing a launched graph. Provides methods to execute
ops in the graph.
-In the Python API: [tf.Session](../api_docs/python/client.md#Session)
+In the Python API: [`tf.Session`](../api_docs/python/client.md#Session)
In the C++ API: class used to launch a graph and run operations
-[tensorflow::Session](../api_docs/cc/ClassSession.md).
+[`tensorflow::Session`](../api_docs/cc/ClassSession.md).
**Shape**
@@ -115,23 +106,23 @@ In a launched graph: Property of the tensors that flow between nodes. Some ops
have strong requirements on the shape of their inputs and report errors at
runtime if these are not met.
-In the Python API: Attribute of a Python Tensor in the graph construction
+In the Python API: Attribute of a Python `Tensor` in the graph construction
API. During constructions the shape of tensors can be only partially known, or
even unknown. See
-[tf.TensorShape](../api_docs/python/framework.md#TensorShape)
+[`tf.TensorShape`](../api_docs/python/framework.md#TensorShape)
In the C++ API: class used to represent the shape of tensors
-[tensorflow::TensorShape](../api_docs/cc/ClassTensorShape.md).
+[`tensorflow::TensorShape`](../api_docs/cc/ClassTensorShape.md).
**SparseTensor**
In the Python API, TensorFlow's representation of a tensor that is sparse in
-arbitrary positions. A SparseTensor stores only the non-empty values along
+arbitrary positions. A `SparseTensor` stores only the non-empty values along
with their indices, using a dictionary-of-keys format. In other words, if
-there are m non-empty values, it maintains a length-m vector of values and
-a matrix with m rows of indices. For efficiency, SparseTensor requires the
+there are `m` non-empty values, it maintains a length-`m` vector of values and
+a matrix with m rows of indices. For efficiency, `SparseTensor` requires the
indices to be sorted along increasing dimension number, i.e. in row-major
-order. Use IndexedSlices if the sparsity is only along the first dimension.
+order. Use `IndexedSlices` if the sparsity is only along the first dimension.
**Tensor**
@@ -141,11 +132,11 @@ dimensions `[batch, height, width, channel]`.
In a launched graph: Type of the data that flow between nodes.
-In the Python API: class used to represent the output and inputs of Ops added
-to the graph [tf.Tensor](../api_docs/python/framework.md#Tensor). Instances of
+In the Python API: class used to represent the output and inputs of ops added
+to the graph [`tf.Tensor`](../api_docs/python/framework.md#Tensor). Instances of
this class do not hold data.
In the C++ API: class used to represent tensors returned from a
-[Session::Run()](../api_docs/cc/ClassSession.md) call
-[tensorflow::Tensor](../api_docs/cc/ClassTensor.md).
+[`Session::Run()`](../api_docs/cc/ClassSession.md) call
+[`tensorflow::Tensor`](../api_docs/cc/ClassTensor.md).
Instances of this class hold data.
diff --git a/tensorflow/g3doc/resources/uses.md b/tensorflow/g3doc/resources/uses.md
index fa67a58163..08417f0ab0 100644
--- a/tensorflow/g3doc/resources/uses.md
+++ b/tensorflow/g3doc/resources/uses.md
@@ -35,4 +35,3 @@ Listed below are some of the many uses of TensorFlow.
* **Organization**: Google
* **Description**: On-device computer vision model to do optical character recognition to enable real-time translation.
* **More info**: [Google Research blog post](http://googleresearch.blogspot.com/2015/07/how-google-translate-squeezes-deep.html)
-}
diff --git a/tensorflow/g3doc/tutorials/deep_cnn/index.md b/tensorflow/g3doc/tutorials/deep_cnn/index.md
index be23e7ccaa..40d289eeef 100644
--- a/tensorflow/g3doc/tutorials/deep_cnn/index.md
+++ b/tensorflow/g3doc/tutorials/deep_cnn/index.md
@@ -1,6 +1,6 @@
# Convolutional Neural Networks <a class="md-anchor" id="AUTOGENERATED-convolutional-neural-networks"></a>
-**NOTE:** This tutorial is intended for *advanced* users of TensorFlow
+> **NOTE:** This tutorial is intended for *advanced* users of TensorFlow
and assumes expertise and experience in machine learning.
## Overview <a class="md-anchor" id="AUTOGENERATED-overview"></a>
@@ -18,28 +18,28 @@ by Alex Krizhevsky.
### Goals <a class="md-anchor" id="AUTOGENERATED-goals"></a>
The goal of this tutorial is to build a relatively small convolutional neural
-network (CNN) for recognizing images. In the process this tutorial:
+network (CNN) for recognizing images. In the process, this tutorial:
1. Highlights a canonical organization for network architecture,
training and evaluation.
2. Provides a template for constructing larger and more sophisticated models.
-The reason CIFAR-10 was selected was because it contains enough complexity to
-exercise much of TensorFlow's ability to scale to large models. At the same
-time, the model is small enough to train fast in order to test new ideas and
-experiments.
+The reason CIFAR-10 was selected was that it is complex enough to exercise
+much of TensorFlow's ability to scale to large models. At the same time,
+the model is small enough to train fast, which is ideal for trying out
+new ideas and experimenting with new techniques.
### Highlights of the Tutorial <a class="md-anchor" id="AUTOGENERATED-highlights-of-the-tutorial"></a>
The CIFAR-10 tutorial demonstrates several important constructs for
designing larger and more sophisticated models in TensorFlow:
-* Core mathematical components including[convolution](
+* Core mathematical components including [convolution](
../../api_docs/python/nn.md#conv2d), [rectified linear activations](
../../api_docs/python/nn.md#relu), [max pooling](
../../api_docs/python/nn.md#max_pool) and [local response normalization](
../../api_docs/python/nn.md#local_response_normalization).
* [Visualization](../../how_tos/summaries_and_tensorboard/index.md)
-of network activity during training including input images,
+of network activities during training, including input images,
losses and distributions of activations and gradients.
* Routines for calculating the
[moving average](../../api_docs/python/train.md#ExponentialMovingAverage)
@@ -55,7 +55,7 @@ data to isolate the model from disk latency and expensive image pre-processing.
We also provide a multi-GPU version of the model which demonstrates:
* Configuring a model to train across multiple GPU cards in parallel.
-* Sharing and updating variables between multiple GPUs.
+* Sharing and updating variables among multiple GPUs.
We hope that this tutorial provides a launch point for building larger CNNs for
vision tasks on TensorFlow.
@@ -81,10 +81,10 @@ The code for this tutorial resides in
File | Purpose
--- | ---
-[`cifar10_input.py`](https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/models/image/cifar10/cifar10_input.py) | Read the native CIFAR-10 binary file format.
-[`cifar10.py`](https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/models/image/cifar10/cifar10.py) | Build the CIFAR-10 model.
-[`cifar10_train.py`](https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/models/image/cifar10/cifar10_train.py) | Train a CIFAR-10 model on a single machine.
-[`cifar10_multi_gpu_train.py`](https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/models/image/cifar10/cifar10_multi_gpu_train.py) | Train a CIFAR-10 model on multiple GPUs.
+[`cifar10_input.py`](https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/models/image/cifar10/cifar10_input.py) | Reads the native CIFAR-10 binary file format.
+[`cifar10.py`](https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/models/image/cifar10/cifar10.py) | Builds the CIFAR-10 model.
+[`cifar10_train.py`](https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/models/image/cifar10/cifar10_train.py) | Trains a CIFAR-10 model on a CPU or GPU.
+[`cifar10_multi_gpu_train.py`](https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/models/image/cifar10/cifar10_multi_gpu_train.py) | Trains a CIFAR-10 model on multiple GPUs.
[`cifar10_eval.py`](https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/models/image/cifar10/cifar10_eval.py) | Evaluates the predictive performance of a CIFAR-10 model.
@@ -130,7 +130,7 @@ artificially increase the data set size:
Please see the [`Images`](../../api_docs/python/image.md) page for the list of
available distortions. We also attach an
-[`image_summary`](../../api_docs/python/train.md?#image_summary) to the images
+[`image_summary`](../../api_docs/python/train.md#image_summary) to the images
so that we may visualize them in TensorBoard. This is a good practice to verify
that inputs are built correctly.
@@ -169,9 +169,9 @@ Here is a graph generated from TensorBoard describing the inference operation:
> **EXERCISE**: The output of `inference` are un-normalized logits. Try editing
the network architecture to return normalized predictions using [`tf.softmax()`]
-(../../api_docs/python/nn.md?cl=head#softmax).
+(../../api_docs/python/nn.md#softmax).
-The `inputs()` and `inference()` functions provide all of the components
+The `inputs()` and `inference()` functions provide all the components
necessary to perform evaluation on a model. We now shift our focus towards
building operations for training a model.
@@ -196,7 +196,7 @@ For regularization, we also apply the usual
variables. The objective function for the model is the sum of the cross entropy
loss and all these weight decay terms, as returned by the `loss()` function.
-We visualize it in TensorBoard with a [scalar_summary](../../api_docs/python/train.md?#scalar_summary):
+We visualize it in TensorBoard with a [scalar_summary](../../api_docs/python/train.md#scalar_summary):
![CIFAR-10 Loss](./cifar_loss.png "CIFAR-10 Total Loss")
###### [View this TensorBoard live! (Chrome/FF)](/tensorboard/cifar.html) <a class="md-anchor" id="AUTOGENERATED--view-this-tensorboard-live---chrome-ff----tensorboard-cifar.html-"></a>
@@ -214,7 +214,7 @@ over time.
The `train()` function adds the operations needed to minimize the objective by
calculating the gradient and updating the learned variables (see
[`GradientDescentOptimizer`](../../api_docs/python/train.md#GradientDescentOptimizer)
-for details). It returns an operation that executes all of the calculations
+for details). It returns an operation that executes all the calculations
needed to train and update the model for one batch of images.
## Launching and Training the Model <a class="md-anchor" id="AUTOGENERATED-launching-and-training-the-model"></a>
@@ -254,7 +254,7 @@ images.
this loss is the sum of the cross entropy and all weight decay terms.
* Keep an eye on the processing speed of a batch. The numbers shown above were
-run on a Tesla K40c. If you are running on a CPU, expect slower performance.
+obtained on a Tesla K40c. If you are running on a CPU, expect slower performance.
> **EXERCISE:** When experimenting, it is sometimes annoying that the first
diff --git a/tensorflow/g3doc/tutorials/mandelbrot/index.md b/tensorflow/g3doc/tutorials/mandelbrot/index.md
index fa06e6b882..4c3a399407 100755
--- a/tensorflow/g3doc/tutorials/mandelbrot/index.md
+++ b/tensorflow/g3doc/tutorials/mandelbrot/index.md
@@ -1,6 +1,18 @@
# Mandelbrot Set <a class="md-anchor" id="AUTOGENERATED-mandelbrot-set"></a>
-```
+Visualizing the Mandelbrot set doesn't have anything to do with machine
+learning, but it makes for a fun example of how one can use TensorFlow for
+general mathematics. This is actually a pretty naive implementation of the
+visualization, but it makes the point. (We may end up providing a more
+elaborate implementation down the line to produce more truly beautiful images.)
+
+Note: This tutorial was originally prepared as an iPython notebook.
+
+## Basic Setup <a class="md-anchor" id="AUTOGENERATED-basic-setup"></a>
+
+We'll need a few imports to get started.
+
+```python
#Import libraries for simulation
import tensorflow as tf
import numpy as np
@@ -12,8 +24,10 @@ from IPython.display import clear_output, Image, display
import scipy.ndimage as nd
```
+Now we'll define a function to actually display the image once we have
+iteration counts.
-```
+```python
def DisplayFractal(a, fmt='jpeg'):
"""Display an array of iteration counts as a
colorful picture of a fractal."""
@@ -29,36 +43,43 @@ def DisplayFractal(a, fmt='jpeg'):
display(Image(data=f.getvalue()))
```
+## Session and Variable Initialization <a class="md-anchor" id="AUTOGENERATED-session-and-variable-initialization"></a>
-```
-sess = tf.InteractiveSession()
-```
-
- Exception AssertionError: AssertionError() in <bound method InteractiveSession.__del__ of <tensorflow.python.client.session.InteractiveSession object at 0x6247390>> ignored
+For playing around like this, we often us an interactive session, but a regular
+session would work as well.
+```python
+ sess = tf.InteractiveSession()
+```
+It's handy that we can freely mix NumPy and TensorFlow.
-```
+```python
# Use NumPy to create a 2D array of complex numbers on [-2,2]x[-2,2]
Y, X = np.mgrid[-1.3:1.3:0.005, -2:1:0.005]
Z = X+1j*Y
```
+Now we define and initialize.
-```
+```python
xs = tf.constant(Z.astype("complex64"))
zs = tf.Variable(xs)
ns = tf.Variable(tf.zeros_like(xs, "float32"))
```
+TensorFlow requires that you explicitly initialize variables before using them.
-```
+```python
tf.InitializeAllVariables().run()
```
+## Defining and Running the Computation <a class="md-anchor" id="AUTOGENERATED-defining-and-running-the-computation"></a>
-```
+Now we specify more of the computation...
+
+```python
# Compute the new values of z: z^2 + x
zs_ = zs*zs + xs
@@ -66,7 +87,7 @@ zs_ = zs*zs + xs
not_diverged = tf.complex_abs(zs_) < 4
# Operation to update the zs and the iteration count.
-#t
+#
# Note: We keep computing zs after they diverge! This
# is very wasteful! There are better, if a little
# less simple, ways to do this.
@@ -77,21 +98,20 @@ step = tf.group(
)
```
+... and run it for a couple hundred steps
-```
+```python
for i in range(200): step.run()
```
+Let's see what we've got.
-```
+```python
DisplayFractal(ns.eval())
```
+![jpeg](mandelbrot_output.jpg)
-![jpeg](output_8_0.jpe)
-
+Not bad!
-```
-
-```
diff --git a/tensorflow/g3doc/tutorials/mandelbrot/output_8_0.jpe b/tensorflow/g3doc/tutorials/mandelbrot/output_8_0.jpe
deleted file mode 100755
index 8e261d44a8..0000000000
--- a/tensorflow/g3doc/tutorials/mandelbrot/output_8_0.jpe
+++ /dev/null
Binary files differ
diff --git a/tensorflow/g3doc/tutorials/mnist/beginners/index.md b/tensorflow/g3doc/tutorials/mnist/beginners/index.md
index eddd4f324a..34bc11aa26 100644
--- a/tensorflow/g3doc/tutorials/mnist/beginners/index.md
+++ b/tensorflow/g3doc/tutorials/mnist/beginners/index.md
@@ -88,9 +88,9 @@ The corresponding labels in MNIST are numbers between 0 and 9, describing
which digit a given image is of.
For the purposes of this tutorial, we're going to want our labels as
as "one-hot vectors". A one-hot vector is a vector which is 0 in most
-dimensions, and 1 in a single dimension. In this case, the \(n\)th digit will be
-represented as a vector which is 1 in the \(n\)th dimensions. For example, 0
-would be \([1,0,0,0,0,0,0,0,0,0,0]\).
+dimensions, and 1 in a single dimension. In this case, the \\(n\\)th digit will be
+represented as a vector which is 1 in the \\(n\\)th dimensions. For example, 0
+would be \\([1,0,0,0,0,0,0,0,0,0,0]\\).
Consequently, `mnist.train.labels` is a
`[60000, 10]` array of floats.
@@ -131,14 +131,14 @@ weights.
We also add some extra evidence called a bias. Basically, we want to be able
to say that some things are more likely independent of the input. The result is
-that the evidence for a class \(i\) given an input \(x\) is:
+that the evidence for a class \\(i\\) given an input \\(x\\) is:
$$\text{evidence}_i = \sum_j W_{i,~ j} x_j + b_i$$
-where \(W_i\) is the weights and \(b_i\) is the bias for class \(i\), and \(j\)
-is an index for summing over the pixels in our input image \(x\). We then
+where \\(W_i\\) is the weights and \\(b_i\\) is the bias for class \\(i\\), and \\(j\\)
+is an index for summing over the pixels in our input image \\(x\\). We then
convert the evidence tallies into our predicted probabilities
-\(y\) using the "softmax" function:
+\\(y\\) using the "softmax" function:
$$y = \text{softmax}(\text{evidence})$$
@@ -168,8 +168,8 @@ on it in Michael Nieslen's book, complete with an interactive visualization.)
You can picture our softmax regression as looking something like the following,
-although with a lot more \(x\)s. For each output, we compute a weighted sum of
-the \(x\)s, add a bias, and then apply softmax.
+although with a lot more \\(x\\)s. For each output, we compute a weighted sum of
+the \\(x\\)s, add a bias, and then apply softmax.
<div style="width:55%; margin:auto; margin-bottom:10px; margin-top:20px;">
<img style="width:100%" src="img/softmax-regression-scalargraph.png">
@@ -261,7 +261,7 @@ y = tf.nn.softmax(tf.matmul(x,W) + b)
```
First, we multiply `x` by `W` with the expression `tf.matmul(x,W)`. This is
-flipped from when we multiplied them in our equation, where we had \(Wx\), as a
+flipped from when we multiplied them in our equation, where we had \\(Wx\\), as a
small trick
to deal with `x` being a 2D tensor with multiple inputs. We then add `b`, and
finally apply `tf.nn.softmax`.
@@ -288,7 +288,7 @@ from gambling to machine learning. It's defined:
$$H_{y'}(y) = -\sum_i y'_i \log(y_i)$$
-Where \(y\) is our predicted probability distribution, and \(y'\) is the true
+Where \\(y\\) is our predicted probability distribution, and \\(y'\\) is the true
distribution (the one-hot vector we'll input). In some rough sense, the
cross-entropy is measuring how inefficient our predictions are for describing
the truth. Going into more detail about cross-entropy is beyond the scope of
@@ -302,7 +302,7 @@ the correct answers:
y_ = tf.placeholder("float", [None,10])
```
-Then we can implement the cross-entropy, \(-\sum y'\log(y)\):
+Then we can implement the cross-entropy, \\(-\sum y'\log(y)\\):
```python
cross_entropy = -tf.reduce_sum(y_*tf.log(y))
@@ -334,7 +334,7 @@ descent algorithm with a learning rate of 0.01. Gradient descent is a simple
procedure, where TensorFlow simply shifts each variable a little bit in the
direction that reduces the cost. But TensorFlow also provides
[many other optimization algorithms]
-(../../../api_docs/python/train.md?#optimizers): using one is as simple as
+(../../../api_docs/python/train.md#optimizers): using one is as simple as
tweaking one line.
What TensorFlow actually does here, behind the scenes, is it adds new operations
diff --git a/tensorflow/g3doc/tutorials/mnist/download/index.md b/tensorflow/g3doc/tutorials/mnist/download/index.md
index e985a2204d..5217b7f531 100644
--- a/tensorflow/g3doc/tutorials/mnist/download/index.md
+++ b/tensorflow/g3doc/tutorials/mnist/download/index.md
@@ -11,7 +11,7 @@ This tutorial references the following files:
File | Purpose
--- | ---
-[`input_data.py`](../input_data.py) | The code to download the MNIST dataset for training and evaluation.
+[`input_data.py`](https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/g3doc/tutorials/mnist/input_data.py) | The code to download the MNIST dataset for training and evaluation.
## Prepare the Data <a class="md-anchor" id="AUTOGENERATED-prepare-the-data"></a>
diff --git a/tensorflow/g3doc/tutorials/mnist/pros/index.md b/tensorflow/g3doc/tutorials/mnist/pros/index.md
index 15892a957d..c6ef4ce956 100644
--- a/tensorflow/g3doc/tutorials/mnist/pros/index.md
+++ b/tensorflow/g3doc/tutorials/mnist/pros/index.md
@@ -18,8 +18,9 @@ TensorFlow session.
### Load MNIST Data <a class="md-anchor" id="AUTOGENERATED-load-mnist-data"></a>
-For your convenience, we've included [a script](../input_data.py) which
-automatically downloads and imports the MNIST dataset. It will create a
+For your convenience, we've included
+[a script](https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/g3doc/tutorials/mnist/input_data.py)
+which automatically downloads and imports the MNIST dataset. It will create a
directory `'MNIST_data'` in which to store the data files.
```python
@@ -170,7 +171,7 @@ can use automatic differentiation to find the gradients of the cost with
respect to each of the variables.
TensorFlow has a variety of
[builtin optimization algorithms]
-(../../../api_docs/python/train.md?#optimizers).
+(../../../api_docs/python/train.md#optimizers).
For this example, we will use steepest gradient descent, with a step length of
0.01, to descend the cross entropy.
diff --git a/tensorflow/g3doc/tutorials/pdes/index.md b/tensorflow/g3doc/tutorials/pdes/index.md
index a7c84ebd63..6866fd6f9a 100755
--- a/tensorflow/g3doc/tutorials/pdes/index.md
+++ b/tensorflow/g3doc/tutorials/pdes/index.md
@@ -1,9 +1,17 @@
# Partial Differential Equations <a class="md-anchor" id="AUTOGENERATED-partial-differential-equations"></a>
+TensorFlow isn't just for machine learning. Here we give a (somewhat
+pedestrian) example of using TensorFlow for simulating the behavior of a
+partial differential equation. We'll simulate the surface of square pond as a
+few raindrops land on it.
+
+Note: This tutorial was originally prepared as an iPython notebook.
+
## Basic Setup <a class="md-anchor" id="AUTOGENERATED-basic-setup"></a>
+A few imports we'll need.
-```
+```python
#Import libraries for simulation
import tensorflow as tf
import numpy as np
@@ -14,8 +22,9 @@ from cStringIO import StringIO
from IPython.display import clear_output, Image, display
```
+A function for displaying the state of the pond's surface as an image.
-```
+```python
def DisplayArray(a, fmt='jpeg', rng=[0,1]):
"""Display an array as a picture."""
a = (a - rng[0])/float(rng[1] - rng[0])*255
@@ -25,15 +34,18 @@ def DisplayArray(a, fmt='jpeg', rng=[0,1]):
display(Image(data=f.getvalue()))
```
+Here we start an interactive TensorFlow session for convience in playing
+around. A regular session would work as well if we were doing this in an
+executable .py file.
-```
+```python
sess = tf.InteractiveSession()
```
## Computational Convenience Functions <a class="md-anchor" id="AUTOGENERATED-computational-convenience-functions"></a>
-```
+```python
def make_kernel(a):
"""Transform a 2D array into a convolution kernel"""
a = np.asarray(a)
@@ -56,13 +68,16 @@ def laplace(x):
## Define the PDE <a class="md-anchor" id="AUTOGENERATED-define-the-pde"></a>
+Our pond is a perfect 500 x 500 square, as is the case for most ponds found in
+nature.
-```
+```python
N = 500
```
+Here we create our pond and hit it with some rain drops.
-```
+```python
# Initial Conditions -- some rain drops hit a pond
# Set everything to zero
@@ -73,16 +88,17 @@ ut_init = np.zeros([N, N], dtype="float32")
for n in range(40):
a,b = np.random.randint(0, N, 2)
u_init[a,b] = np.random.uniform()
-
+
DisplayArray(u_init, rng=[-0.1, 0.1])
```
+![jpeg](pde_output_1.jpg)
-![jpeg](output_8_0.jpe)
+Now let's specify the details of the differential equation.
-```
+```python
# paramaters
# eps -- time resolution
# damping -- wave damping
@@ -105,8 +121,9 @@ step = tf.group(
## Run The Simulation <a class="md-anchor" id="AUTOGENERATED-run-the-simulation"></a>
+This is where it gets fun -- running time forward with a simple for loop.
-```
+```python
# initialize state to initial conditions
tf.InitializeAllVariables().Run()
@@ -120,11 +137,7 @@ for i in range(1000):
DisplayArray(U.eval(), rng=[-0.1, 0.1])
```
+![jpeg](pde_output_2.jpg)
-![jpeg](output_11_0.jpe)
-
+Look! Ripples!
-
-```
-
-```
diff --git a/tensorflow/g3doc/tutorials/pdes/output_11_0.jpe b/tensorflow/g3doc/tutorials/pdes/output_11_0.jpe
deleted file mode 100755
index 8cd8cf02b5..0000000000
--- a/tensorflow/g3doc/tutorials/pdes/output_11_0.jpe
+++ /dev/null
Binary files differ
diff --git a/tensorflow/g3doc/tutorials/pdes/output_8_0.jpe b/tensorflow/g3doc/tutorials/pdes/output_8_0.jpe
deleted file mode 100755
index 97954effc0..0000000000
--- a/tensorflow/g3doc/tutorials/pdes/output_8_0.jpe
+++ /dev/null
Binary files differ
diff --git a/tensorflow/g3doc/tutorials/word2vec/index.md b/tensorflow/g3doc/tutorials/word2vec/index.md
index c9b66cab88..f3ae416d43 100644
--- a/tensorflow/g3doc/tutorials/word2vec/index.md
+++ b/tensorflow/g3doc/tutorials/word2vec/index.md
@@ -94,8 +94,8 @@ datasets. We will focus on the skip-gram model in the rest of this tutorial.
Neural probabilistic language models are traditionally trained using the
[maximum likelihood](https://en.wikipedia.org/wiki/Maximum_likelihood) (ML)
-principle to maximize the probability of the next word \(w_t\) (for 'target)
-given the previous words \(h\) (for 'history') in terms of a
+principle to maximize the probability of the next word \\(w_t\\) (for 'target)
+given the previous words \\(h\\) (for 'history') in terms of a
[*softmax* function](https://en.wikipedia.org/wiki/Softmax_function),
$$
@@ -106,8 +106,8 @@ P(w_t | h) &= \text{softmax}(\exp \{ \text{score}(w_t, h) \}) \\
\end{align}
$$
-where \(\text{score}(w_t, h)\) computes the compatibility of word \(w_t\) with
-the context \(h\) (a dot product is commonly used). We train this model by
+where \\(\text{score}(w_t, h)\\) computes the compatibility of word \\(w_t\\) with
+the context \\(h\\) (a dot product is commonly used). We train this model by
maximizing its log-likelihood on the training set, i.e. by maximizing
$$
@@ -120,8 +120,8 @@ $$
This yields a properly normalized probabilistic model for language modeling.
However this is very expensive, because we need to compute and normalize each
-probability using the score for all other \(V\) words \(w'\) in the current
-context \(h\), *at every training step*.
+probability using the score for all other \\(V\\) words \\(w'\\) in the current
+context \\(h\\), *at every training step*.
<div style="width:60%; margin:auto; margin-bottom:10px; margin-top:20px;">
<img style="width:100%" src="img/softmax-nplm.png" alt>
@@ -130,7 +130,7 @@ context \(h\), *at every training step*.
On the other hand, for feature learning in word2vec we do not need a full
probabilistic model. The CBOW and skip-gram models are instead trained using a
binary classification objective (logistic regression) to discriminate the real
-target words \(w_t\) from \(k\) imaginary (noise) words \(\tilde w\), in the
+target words \\(w_t\\) from \\(k\\) imaginary (noise) words \\(\tilde w\\), in the
same context. We illustrate this below for a CBOW model. For skip-gram the
direction is simply inverted.
@@ -144,10 +144,10 @@ $$J_\text{NEG} = \log Q_\theta(D=1 |w_t, h) +
k \mathop{\mathbb{E}}_{\tilde w \sim P_\text{noise}}
\left[ \log Q_\theta(D = 0 |\tilde w, h) \right]$$,
-where \(Q_\theta(D=1 | w, h)\) is the binary logistic regression probability
-under the model of seeing the word \(w\) in the context \(h\) in the dataset
-\(D\), calculated in terms of the learned embedding vectors \(\theta\). In
-practice we approximate the expectation by drawing \(k\) constrastive words
+where \\(Q_\theta(D=1 | w, h)\\) is the binary logistic regression probability
+under the model of seeing the word \\(w\\) in the context \\(h\\) in the dataset
+\\(D\\), calculated in terms of the learned embedding vectors \\(\theta\\). In
+practice we approximate the expectation by drawing \\(k\\) constrastive words
from the noise distribution (i.e. we compute a
[Monte Carlo average](https://en.wikipedia.org/wiki/Monte_Carlo_integration)).
@@ -159,7 +159,7 @@ and there is good mathematical motivation for using this loss function:
The updates it proposes approximate the updates of the softmax function in the
limit. But computationally it is especially appealing because computing the
loss function now scales only with the number of *noise words* that we
-select (\(k\)), and not *all words* in the vocabulary (\(V\)). This makes it
+select (\\(k\\)), and not *all words* in the vocabulary (\\(V\\)). This makes it
much faster to train. We will actually make use of the very similar
[noise-contrastive estimation (NCE)](http://papers.nips.cc/paper/5165-learning-word-embeddings-efficiently-with-noise-contrastive-estimation.pdf)
loss, for which TensorFlow has a handy helper function `tf.nn.nce_loss()`.
@@ -198,21 +198,21 @@ dataset, but we typically optimize this with
where typically `16 <= batch_size <= 512`). So let's look at one step of
this process.
-Let's imagine at training step \(t\) we observe the first training case above,
+Let's imagine at training step \\(t\\) we observe the first training case above,
where the goal is to predict `the` from `quick`. We select `num_noise` number
of noisy (contrastive) examples by drawing from some noise distribution,
-typically the unigram distribution, \(P(w)\). For simplicity let's say
+typically the unigram distribution, \\(P(w)\\). For simplicity let's say
`num_noise=1` and we select `sheep` as a noisy example. Next we compute the
loss for this pair of observed and noisy examples, i.e. the objective at time
-step \(t\) becomes
+step \\(t\\) becomes
$$J^{(t)}_\text{NEG} = \log Q_\theta(D=1 | \text{the, quick}) +
\log(Q_\theta(D=0 | \text{sheep, quick}))$$.
-The goal is to make an update to the embedding parameters \(\theta\) to improve
+The goal is to make an update to the embedding parameters \\(\theta\\) to improve
(in this case, maximize) this objective function. We do this by deriving the
-gradient of the loss with respect to the embedding parameters \(\theta\), i.e.
-\(\frac{\partial}{\partial \theta} J_\text{NEG}\) (luckily TensorFlow provides
+gradient of the loss with respect to the embedding parameters \\(\theta\\), i.e.
+\\(\frac{\partial}{\partial \theta} J_\text{NEG}\\) (luckily TensorFlow provides
easy helper functions for doing this!). We then perform an update to the
embeddings by taking a small step in the direction of the gradient. When this
process is repeated over the entire training set, this has the effect of
diff --git a/tensorflow/python/framework/docs.py b/tensorflow/python/framework/docs.py
index 99d02a5380..21104bff63 100644
--- a/tensorflow/python/framework/docs.py
+++ b/tensorflow/python/framework/docs.py
@@ -338,10 +338,10 @@ class Library(Document):
def _print_function(self, f, prefix, fullname, func):
"""Prints the given function to `f`."""
- heading = prefix + " " + fullname
+ heading = prefix + " `" + fullname
if not isinstance(func, property):
heading += self._generate_signature_for_function(func)
- heading += " {#%s}" % _get_anchor(self._module_to_name, fullname)
+ heading += "` {#%s}" % _get_anchor(self._module_to_name, fullname)
print >>f, heading
print >>f, ""
self._print_formatted_docstring(inspect.getdoc(func), f)
diff --git a/tensorflow/python/framework/ops.py b/tensorflow/python/framework/ops.py
index addcb1f850..d5e6e3bcd9 100644
--- a/tensorflow/python/framework/ops.py
+++ b/tensorflow/python/framework/ops.py
@@ -434,9 +434,9 @@ def convert_to_tensor(value, dtype=None, name=None):
return tf.matmul(arg, arg) + arg
# The following calls are equivalent.
- value_1 = my_func(tf.constant([[1.0, 2.0], [3.0, 4.0]))
+ value_1 = my_func(tf.constant([[1.0, 2.0], [3.0, 4.0]]))
value_2 = my_func([[1.0, 2.0], [3.0, 4.0]])
- value_3 = my_func(numpy.array([[1.0, 2.0], [3.0, 4.0]], dtype=numpy.float32))
+ value_3 = my_func(np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32))
```
This function can be useful when composing a new operation in Python
diff --git a/tensorflow/python/training/input.py b/tensorflow/python/training/input.py
index 6054d2bdfc..0a383efcf9 100644
--- a/tensorflow/python/training/input.py
+++ b/tensorflow/python/training/input.py
@@ -275,35 +275,34 @@ def _enqueue(queue, tensor_list, threads, enqueue_many):
def batch(tensor_list, batch_size, num_threads=1, capacity=32,
enqueue_many=False, shapes=None, name=None):
- """Run tensor_list to fill a queue to create batches.
+ """Creates batches of tensors in `tensor_list`.
- Implemented using a queue -- a QueueRunner for the queue
- is added to the current Graph's QUEUE_RUNNER collection.
+ This function is implemented using a queue. A `QueueRunner` for the
+ queue is added to the current `Graph`'s `QUEUE_RUNNER` collection.
+
+ If `enqueue_many` is `False`, `tensor_list` is assumed to represent a
+ single example. An input tensor with shape `[x, y, z]` will be output
+ as a tensor with shape `[batch_size, x, y, z]`.
+
+ If `enqueue_many` is `True`, `tensor_list` is assumed to represent a
+ batch of examples, where the first dimension is indexed by example,
+ and all members of `tensor_list` should have the same size in the
+ first dimension. If an input tensor has shape `[*, x, y, z]`, the
+ output will have shape `[batch_size, x, y, z]`. The `capacity` argument
+ controls the how long the prefetching is allowed to grow the queues.
Args:
tensor_list: The list of tensors to enqueue.
batch_size: The new batch size pulled from the queue.
- num_threads: The number of threads enqueuing tensor_list.
- capacity: Maximum number of elements in the queue, controls the
- how far ahead the prefetching allowed is allowed to get and
- memory usage.
- enqueue_many: If False, tensor_list is assumed to represent a
- single example. If True, tensor_list is assumed to represent
- a batch of examples, where the first dimension is indexed by
- example, and all members of tensor_list should have the same
- size in the first dimension.
- shapes: Optional. The shapes for each example. Defaults to the
- inferred shapes for tensor_list (leaving off the first dimension
- if enqueue_many is True).
- name: A name for the operations (optional).
+ num_threads: The number of threads enqueuing `tensor_list`.
+ capacity: An integer. The maximum number of elements in the queue.
+ enqueue_many: Whether each tensor in `tensor_list` is a single example.
+ shapes: (Optional) The shapes for each example. Defaults to the
+ inferred shapes for `tensor_list`.
+ name: (Optional) A name for the operations.
Returns:
- A list of tensors with the same number and types as tensor_list.
- If enqueue_many is false, then an input tensor with shape
- `[x, y, z]` will be output as a tensor with shape
- `[batch_size, x, y, z]`. If enqueue_many is True, and an
- input tensor has shape `[*, x, y, z]`, the the output will have
- shape `[batch_size, x, y, z]`.
+ A list of tensors with the same number and types as `tensor_list`.
"""
with ops.op_scope(tensor_list, name, "batch") as name:
tensor_list = _validate(tensor_list)
@@ -327,30 +326,31 @@ def batch(tensor_list, batch_size, num_threads=1, capacity=32,
# Once this is done, batch() can be written as a call to batch_join().
def batch_join(tensor_list_list, batch_size, capacity=32, enqueue_many=False,
shapes=None, name=None):
- """Run a list of tensors to fill a queue to create batches of examples.
+ """Runs a list of tensors to fill a queue to create batches of examples.
Enqueues a different list of tensors in different threads.
Implemented using a queue -- a `QueueRunner` for the queue
is added to the current `Graph`'s `QUEUE_RUNNER` collection.
- `len(tensor_list_list)` threads will be started, with thread `i` enqueuing
- the tensors from tensor_list[i]. `tensor_list[i1][j]` must match
- `tensor_list[i2][j]` in type and shape, except in the first dimension if
- `enqueue_many` is true.
+ `len(tensor_list_list)` threads will be started,
+ with thread `i` enqueuing the tensors from
+ `tensor_list_list[i]`. `tensor_list_list[i1][j]` must match
+ `tensor_list_list[i2][j]` in type and shape, except in the first
+ dimension if `enqueue_many` is true.
- If `enqueue_many` is false, each `tensor_list_list[i]` is assumed to
- represent a single example. Otherwise, `tensor_list_list[i]` is assumed to
- represent a batch of examples, where the first dimension is indexed by
- example, and all members of `tensor_list_list[i]` should have the same size
- in the first dimension.
+ If `enqueue_many` is `False`, each `tensor_list_list[i]` is assumed
+ to represent a single example. An input tensor `x` will be output as a
+ tensor with shape `[batch_size] + x.shape`.
- If `enqueue_many` is false, then an input tensor `x` will be output as a
- tensor with shape `[batch_size] + x.shape`. If `enqueue_many` is true, the
- slices of any input tensor `x` are treated as examples, and the output tensors
- will have shape `[batch_size] + x.shape[1:]`.
+ If `enqueue_many` is `True`, `tensor_list_list[i]` is assumed to
+ represent a batch of examples, where the first dimension is indexed
+ by example, and all members of `tensor_list_list[i]` should have the
+ same size in the first dimension. The slices of any input tensor
+ `x` are treated as examples, and the output tensors will have shape
+ `[batch_size] + x.shape[1:]`.
- The `capacity` argument controls the how long the prefetching
- is allowed to grow the queues.
+ The `capacity` argument controls the how long the prefetching is allowed to
+ grow the queues.
Args:
tensor_list_list: A list of tuples of tensors to enqueue.
@@ -360,7 +360,7 @@ def batch_join(tensor_list_list, batch_size, capacity=32, enqueue_many=False,
example.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensor_list_list[i]`.
- name: A name for the operations (optional).
+ name: (Optional) A name for the operations.
Returns:
A list of tensors with the same number and types as
@@ -383,42 +383,55 @@ def batch_join(tensor_list_list, batch_size, capacity=32, enqueue_many=False,
def shuffle_batch(tensor_list, batch_size, capacity, min_after_dequeue,
num_threads=1, seed=None, enqueue_many=False, shapes=None,
name=None):
- """Create batches by randomly shuffling tensors.
+ """Creates batches by randomly shuffling tensors.
+
+ This function adds the following to the current `Graph`:
+
+ * A shuffling queue into which tensors from `tensor_list` are enqueued.
+ * A `dequeue_many` operation to create batches from the queue.
+ * A `QueueRunner` to `QUEUE_RUNNER` collection, to enqueue the tensors
+ from `tensor_list`.
+
+ If `enqueue_many` is `False`, `tensor_list` is assumed to represent a
+ single example. An input tensor with shape `[x, y, z]` will be output
+ as a tensor with shape `[batch_size, x, y, z]`.
- This adds:
+ If `enqueue_many` is `True`, `tensor_list` is assumed to represent a
+ batch of examples, where the first dimension is indexed by example,
+ and all members of `tensor_list` should have the same size in the
+ first dimension. If an input tensor has shape `[*, x, y, z]`, the
+ output will have shape `[batch_size, x, y, z]`.
- * a shuffling queue into which tensors from tensor_list are enqueued.
- * a dequeue many operation to create batches from the queue,
- * and a QueueRunner is added to the current Graph's QUEUE_RUNNER collection,
- to enqueue the tensors from tensor_list.
+ The `capacity` argument controls the how long the prefetching is allowed to
+ grow the queues.
+
+ For example:
+
+ ```python
+ # Creates batches of 32 images and 32 labels.
+ image_batch, label_batch = tf.train.shuffle_batch(
+ [single_image, single_label],
+ batch_size=32,
+ num_threads=4,
+ capacity=50000,
+ min_after_dequeue=10000)
+ ```
Args:
tensor_list: The list of tensors to enqueue.
batch_size: The new batch size pulled from the queue.
- capacity: Maximum number of elements in the queue, controls the
- how far ahead the prefetching allowed is allowed to get and
- memory usage.
+ capacity: An integer. The maximum number of elements in the queue.
min_after_dequeue: Minimum number elements in the queue after a
dequeue, used to ensure a level of mixing of elements.
- num_threads: The number of threads enqueuing tensor_list.
+ num_threads: The number of threads enqueuing `tensor_list`.
seed: Seed for the random shuffling within the queue.
- enqueue_many: If False, tensor_list is assumed to represent a
- single example. If True, tensor_list is assumed to represent
- a batch of examples, where the first dimension is indexed by
- example, and all members of tensor_list should have the same
- size in the first dimension.
- shapes: Optional. The shapes for each example. Defaults to the
- inferred shapes for tensor_list (leaving off the first dimension
- if enqueue_many is True).
- name: A name for the operations (optional).
+ enqueue_many: Whether each tensor in `tensor_list` is a single example.
+ shapes: (Optional) The shapes for each example. Defaults to the
+ inferred shapes for `tensor_list`.
+ name: (Optional) A name for the operations.
Returns:
- A list of tensors with the same number and types as tensor_list.
- If enqueue_many is false, then an input tensor with shape
- `[x, y, z]` will be output as a tensor with shape
- `[batch_size, x, y, z]`. If enqueue_many is True, and an
- input tensor has shape `[*, x, y, z]`, the the output will have
- shape `[batch_size, x, y, z]`.
+ A list of tensors with the same number and types as `tensor_list`.
"""
with ops.op_scope(tensor_list, name, "shuffle_batch") as name:
tensor_list = _validate(tensor_list)
@@ -446,44 +459,46 @@ def shuffle_batch_join(tensor_list_list, batch_size, capacity,
"""Create batches by randomly shuffling tensors.
This version enqueues a different list of tensors in different threads.
- It adds:
+ It adds the following to the current `Graph`:
+
+ * A shuffling queue into which tensors from `tensor_list_list` are enqueued.
+ * A `dequeue_many` operation to create batches from the queue.
+ * A `QueueRunner` to `QUEUE_RUNNER` collection, to enqueue the tensors
+ from `tensor_list_list`.
+
+ `len(tensor_list_list)` threads will be started, with thread `i` enqueuing
+ the tensors from `tensor_list_list[i]`. `tensor_list_list[i1][j]` must match
+ `tensor_list_list[i2][j]` in type and shape, except in the first dimension if
+ `enqueue_many` is true.
+
+ If `enqueue_many` is `False`, each `tensor_list_list[i]` is assumed
+ to represent a single example. An input tensor with shape `[x, y,
+ z]` will be output as a tensor with shape `[batch_size, x, y, z]`.
- * a shuffling queue into which tensors from tensor_list_list are enqueued.
- * a dequeue many operation to create batches from the queue,
- * and a QueueRunner is added to the current Graph's QUEUE_RUNNER collection,
- to enqueue the tensors from tensor_list_list.
+ If `enqueue_many` is `True`, `tensor_list_list[i]` is assumed to
+ represent a batch of examples, where the first dimension is indexed
+ by example, and all members of `tensor_list_list[i]` should have the
+ same size in the first dimension. If an input tensor has shape `[*, x,
+ y, z]`, the output will have shape `[batch_size, x, y, z]`.
+
+ The `capacity` argument controls the how long the prefetching is allowed to
+ grow the queues.
Args:
tensor_list_list: A list of tuples of tensors to enqueue.
- len(tensor_list_list) threads will be started, with the i-th
- thread enqueuing the tensors from tensor_list[i].
- tensor_list[i1][j] must match tensor_list[i2][j] in type and
- shape (except in the first dimension if enqueue_many is true).
- batch_size: The new batch size pulled from the queue.
- capacity: Maximum number of elements in the queue, controls the
- how far ahead the prefetching allowed is allowed to get and
- memory usage.
+ batch_size: An integer. The new batch size pulled from the queue.
+ capacity: An integer. The maximum number of elements in the queue.
min_after_dequeue: Minimum number elements in the queue after a
dequeue, used to ensure a level of mixing of elements.
seed: Seed for the random shuffling within the queue.
- enqueue_many: If `False`, each tensor_list_list[i] is assumed to
- represent a single example. If `True`, tensor_list_list[i] is
- assumed to represent a batch of examples, where the first
- dimension is indexed by example, and all members of
- tensor_list_list[i] should have the same size in the first
- dimension.
- shapes: Optional. The shapes for each example. Defaults to the
- inferred shapes for `tensor_list_list[i]` (which must match, after
- leaving off the first dimension if enqueue_many is `True`).
- name: A name for the operations (optional).
+ enqueue_many: Whether each tensor in `tensor_list_list` is a single
+ example.
+ shapes: (Optional) The shapes for each example. Defaults to the
+ inferred shapes for `tensor_list_list[i]`.
+ name: (Optional) A name for the operations.
Returns:
- A list of tensors with the same number and types as
- tensor_list_list[i]. If enqueue_many is false, then an input
- tensor with shape `[x, y, z]` will be output as a tensor with
- shape `[batch_size, x, y, z]`. If enqueue_many is True, and an
- input tensor has shape `[*, x, y, z]`, the the output will have
- shape `[batch_size, x, y, z]`.
+ A list of tensors with the same number and types as `tensor_list_list[i]`.
"""
with ops.op_scope(
_flatten(tensor_list_list), name, "shuffle_batch_join") as name:
diff --git a/tensorflow/tools/docker/Dockerfile b/tensorflow/tools/docker/Dockerfile
deleted file mode 100644
index 02d8837858..0000000000
--- a/tensorflow/tools/docker/Dockerfile
+++ /dev/null
@@ -1,100 +0,0 @@
-FROM ipython/notebook:latest
-
-MAINTAINER Craig Citro <craigcitro@google.com>
-
-# Set up Bazel.
-# Install dependencies for bazel.
-RUN apt-get update && apt-get install -y \
- pkg-config \
- zip \
- g++ \
- zlib1g-dev \
- unzip \
- swig \
- software-properties-common \
- wget
-
-# We need to add a custom PPA to pick up JDK8, since trusty doesn't
-# have an openjdk8 backport. openjdk-r is maintained by a reliable contributor:
-# Matthias Klose (https://launchpad.net/~doko). It will do until
-# we either update the base image beyond 14.04 or openjdk-8 is
-# finally backported to trusty; see e.g.
-# https://bugs.launchpad.net/trusty-backports/+bug/1368094
-RUN add-apt-repository -y ppa:openjdk-r/ppa && \
- apt-get update && \
- apt-get install -y openjdk-8-jdk openjdk-8-jre-headless
-
-# Set up CUDA variables and symlinks
-COPY cuda /usr/local/cuda
-ENV CUDA_PATH /usr/local/cuda
-ENV LD_LIBRARY_PATH /usr/local/cuda/lib64
-RUN ln -s libcuda.so.1 /usr/lib/x86_64-linux-gnu/libcuda.so
-
-# Running bazel inside a `docker build` command causes trouble, cf:
-# https://github.com/bazelbuild/bazel/issues/134
-# The easiest solution is to set up a bazelrc file forcing --batch.
-RUN echo "startup --batch" >>/root/.bazelrc
-# Similarly, we need to workaround sandboxing issues:
-# https://github.com/bazelbuild/bazel/issues/418
-RUN echo "build --spawn_strategy=standalone --genrule_strategy=standalone" \
- >>/root/.bazelrc
-ENV BAZELRC /root/.bazelrc
-# Install the most recent bazel release.
-ENV BAZEL_VERSION 0.1.1
-WORKDIR /
-RUN mkdir /bazel && \
- cd /bazel && \
- wget https://github.com/bazelbuild/bazel/releases/download/$BAZEL_VERSION/bazel-$BAZEL_VERSION-installer-linux-x86_64.sh && \
- wget -O /bazel/LICENSE.txt https://raw.githubusercontent.com/bazelbuild/bazel/master/LICENSE.txt
- chmod +x bazel-*.sh && \
- ./bazel-$BAZEL_VERSION-installer-linux-x86_64.sh && \
- cd / && \
- rm -f /bazel/bazel-$BAZEL_VERSION-installer-linux-x86_64.sh
-
-# Download and build TensorFlow.
-WORKDIR /tensorflow
-# Pick up some TF dependencies
-RUN apt-get update && \
- apt-get install -y python-numpy && \
- apt-get install -y libfreetype6-dev
-
-# We can't clone the TF git repo yet, because of permissions issues.
-# RUN git clone https://tensorflow.googlesource.com/
-# Instead, we manually copy it in:
-COPY tensorflow /tensorflow
-
-# Set up the CUDA tensorflow directories
-RUN rm -rf /tensorflow/third_party/gpus/cuda/lib64
-RUN rm -rf /tensorflow/third_party/gpus/cuda/bin
-RUN rm -rf /tensorflow/third_party/gpus/cuda/include
-RUN rm -rf /tensorflow/third_party/gpus/cuda/nvvm
-RUN ln -s /usr/local/cuda/lib64 /tensorflow/third_party/gpus/cuda/
-RUN ln -s /usr/local/cuda/bin /tensorflow/third_party/gpus/cuda/
-RUN ln -s /usr/local/cuda/include /tensorflow/third_party/gpus/cuda/
-RUN ln -s /usr/local/cuda/nvvm /tensorflow/third_party/gpus/cuda/
-
-# Now we build
-RUN bazel clean && \
- bazel build -c opt --config=cuda tensorflow/tools/docker:simple_console
-
-ENV PYTHONPATH=/tensorflow/bazel-bin/tensorflow/tools/docker/simple_console.runfiles/:$PYTHONPATH
-
-# Add any notebooks in this directory.
-COPY notebooks /notebooks
-
-# Add variables for the local IPython. This sets a fixed password and
-# switches to HTTP (to avoid self-signed certificate warnings in
-# Chrome).
-ENV PASSWORD=JustForNow
-ENV USE_HTTP=1
-
-RUN if [ -f /notebooks/requirements.txt ];\
- then pip install -r /notebooks/requirements.txt;\
- fi
-
-# Set the workdir so we see notebooks on the IPython landing page.
-WORKDIR /notebooks
-
-# Remove CUDA libraries, headers, nvcc. The user will have to
-# provide this directly when running docker.
-RUN rm -rf /usr/local/cuda
diff --git a/tensorflow/tools/docker/README.md b/tensorflow/tools/docker/README.md
deleted file mode 100644
index 1d64b7ea1b..0000000000
--- a/tensorflow/tools/docker/README.md
+++ /dev/null
@@ -1,63 +0,0 @@
-# Using TensorFlow via Docker
-
-This directory contains `Dockerfile`s to make it easy to get up and running with
-TensorFlow via [Docker](http://www.docker.com/).
-
-## Installing Docker
-
-General installation instructions are
-[on the Docker site](https://docs.docker.com/installation/), but we give some
-quick links here:
-
-* [OSX](https://docs.docker.com/installation/mac/): [docker toolbox](https://www.docker.com/toolbox)
-* [ubuntu](https://docs.docker.com/installation/ubuntulinux/)
-
-## Running the container
-
-Before you build your container, you can add notebooks you need
-to a subdirectory of your working directory `notebooks/` and any python
-libraries you need for them to `notebooks/requirements.txt` to have them
-installed with `pip`.
-
-To build a container image from this `Dockerfile`, just run
-
- $ docker build -t $USER/tensorflow_docker .
-
-This will create a new container from the description, and print out an
-identifying hash. You can then run this container locally:
-
- $ docker run -p 8888:8888 -it $USER/tensorflow_docker
-
-This will start the container (inside a VM locally), and expose the running
-IPython endpoint locally on port 8888. (The `-it` flags keep stdin connected to
-a tty in the container, which is helpful when you want to stop the server;
-`docker help run` explains all the possibilities.)
-
-**NOTE**: If you want to be able to add data to your IPython Notebook while it's
-running you can do this in a subdirectory of the /notebook volume as follows:
-
- $ docker run -p 8888:8888 -it -v ./notebook/data:/notebook/data \
- $USER/tensorflow_docker
-
-**Caveat**: Note that `docker build` uses the first positional argument as the
-*context* for the build; in particular, it starts by collecting all files in
-that directory and shipping them to the docker daemon to build the image itself.
-This means you shouldn't use the `-f` flag to use this Dockerfile from a
-different directory, or you'll end up copying around more files than you'd like.
-So:
-
- # ok
- $ docker build . # inside tools/docker
- $ docker build path/to/tools/docker # further up the tree
- # bad
- $ docker build -f tools/docker/Dockerfile . # will pick up all files in .
-
-## Experimenting in the container:
-
-When the container starts up, it launches an IPython notebook server, populated
-with several "Getting Started with TensorFlow" notebooks.
-
-# TODO
-
-* Decide how much of this is handled by the native
- [docker support in bazel](http://bazel.io/blog/2015/07/28/docker_build.html).