aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar Billy Lamberta <blamb@google.com>2018-07-24 11:52:23 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-07-24 11:58:53 -0700
commitff2aa1b59d4a111af094c0c7724e453eefe1f3b7 (patch)
treecd13149671e53a3b28e9a2fdb012310a46de03d9
parentbadf913c0a2f83ca933b8fe73a29f7dd5d2bc5ce (diff)
Setup for TFLite subsite
PiperOrigin-RevId: 205866236
-rw-r--r--tensorflow/contrib/lite/g3doc/README.md4
-rw-r--r--tensorflow/contrib/lite/g3doc/_book.yaml58
-rw-r--r--tensorflow/contrib/lite/g3doc/_index.yaml67
-rw-r--r--tensorflow/contrib/lite/g3doc/_project.yaml10
-rw-r--r--tensorflow/contrib/lite/g3doc/api_docs/python/_toc.yaml6
-rw-r--r--tensorflow/contrib/lite/g3doc/api_docs/python/index.md10
-rw-r--r--tensorflow/contrib/lite/g3doc/apis.md3
-rw-r--r--tensorflow/contrib/lite/g3doc/benchmarks.md178
-rw-r--r--tensorflow/contrib/lite/g3doc/custom_operators.md3
-rw-r--r--tensorflow/contrib/lite/g3doc/demo_android.md (renamed from tensorflow/docs_src/mobile/tflite/demo_android.md)3
-rw-r--r--tensorflow/contrib/lite/g3doc/demo_ios.md (renamed from tensorflow/docs_src/mobile/tflite/demo_ios.md)3
-rw-r--r--tensorflow/contrib/lite/g3doc/devguide.md (renamed from tensorflow/docs_src/mobile/tflite/devguide.md)27
-rw-r--r--tensorflow/contrib/lite/g3doc/ios.md3
-rw-r--r--tensorflow/contrib/lite/g3doc/models.md3
-rw-r--r--tensorflow/contrib/lite/g3doc/ops_versioning.md3
-rw-r--r--tensorflow/contrib/lite/g3doc/overview.md (renamed from tensorflow/docs_src/mobile/tflite/index.md)3
-rw-r--r--tensorflow/contrib/lite/g3doc/performance.md (renamed from tensorflow/docs_src/mobile/tflite/performance.md)3
-rw-r--r--tensorflow/contrib/lite/g3doc/rpi.md3
-rw-r--r--tensorflow/contrib/lite/g3doc/tf_ops_compatibility.md3
-rw-r--r--tensorflow/contrib/lite/g3doc/tfmobile/android_build.md (renamed from tensorflow/docs_src/mobile/android_build.md)6
-rw-r--r--tensorflow/contrib/lite/g3doc/tfmobile/index.md (renamed from tensorflow/docs_src/mobile/mobile_intro.md)50
-rw-r--r--tensorflow/contrib/lite/g3doc/tfmobile/ios_build.md (renamed from tensorflow/docs_src/mobile/ios_build.md)3
-rw-r--r--tensorflow/contrib/lite/g3doc/tfmobile/linking_libs.md (renamed from tensorflow/docs_src/mobile/linking_libs.md)109
-rw-r--r--tensorflow/contrib/lite/g3doc/tfmobile/optimizing.md (renamed from tensorflow/docs_src/mobile/optimizing.md)11
-rw-r--r--tensorflow/contrib/lite/g3doc/tfmobile/prepare_models.md (renamed from tensorflow/docs_src/mobile/prepare_models.md)7
-rw-r--r--tensorflow/docs_src/mobile/README.md3
-rw-r--r--tensorflow/docs_src/mobile/index.md33
-rw-r--r--tensorflow/docs_src/mobile/leftnav_files15
28 files changed, 331 insertions, 299 deletions
diff --git a/tensorflow/contrib/lite/g3doc/README.md b/tensorflow/contrib/lite/g3doc/README.md
new file mode 100644
index 0000000000..e3db478481
--- /dev/null
+++ b/tensorflow/contrib/lite/g3doc/README.md
@@ -0,0 +1,4 @@
+This is a *work-in-progress* TF Lite subsite for:
+https://www.tensorflow.org/mobile
+
+DO NOT PUBLISH
diff --git a/tensorflow/contrib/lite/g3doc/_book.yaml b/tensorflow/contrib/lite/g3doc/_book.yaml
new file mode 100644
index 0000000000..98abd5743b
--- /dev/null
+++ b/tensorflow/contrib/lite/g3doc/_book.yaml
@@ -0,0 +1,58 @@
+upper_tabs:
+# Tabs left of dropdown menu
+- include: /_upper_tabs_left.yaml
+# Dropdown menu
+- name: Ecosystem
+ path: /ecosystem
+ is_default: True
+ menu:
+ - include: /ecosystem/_menu_toc.yaml
+ lower_tabs:
+ # Subsite tabs
+ other:
+ - name: Guide
+ contents:
+ - title: Overview
+ path: /mobile/overview
+ - title: Developer Guide
+ path: /mobile/devguide
+ - title: Android Demo App
+ path: /mobile/demo_android
+ - title: iOS Demo App
+ path: /mobile/demo_ios
+ - title: Performance
+ path: /mobile/performance
+ - break: True
+ - title: TensorFlow Lite APIs
+ path: /mobile/apis
+ - title: Custom operators
+ path: /mobile/custom_operators
+ - title: TensorFlow Lite Ops Versioning
+ path: /mobile/ops_versioning
+ - title: TensorFlow Lite Compatibility Guide
+ path: /mobile/tf_ops_compatibility
+ - title: List of Hosted Models
+ path: /mobile/models
+ - title: TensorFlow Lite for iOS
+ path: /mobile/ios
+ - title: TensorFlow Lite for Raspberry Pi
+ path: /mobile/rpi
+
+ - heading: TF Mobile
+ status: deprecated
+ - title: Overview
+ path: /mobile/tfmobile/
+ - title: Building TensorFlow on Android
+ path: /mobile/tfmobile/android_build
+ - title: Building TensorFlow on IOS
+ path: /mobile/tfmobile/ios_build
+ - title: Integrating TensorFlow libraries
+ path: /mobile/tfmobile/linking_libs
+ - title: Preparing models for mobile deployment
+ path: /mobile/tfmobile/prepare_models
+ - title: Optimizing for mobile
+ path: /mobile/tfmobile/optimizing
+
+ - name: API
+ contents:
+ - include: /mobile/api_docs/python/_toc.yaml
diff --git a/tensorflow/contrib/lite/g3doc/_index.yaml b/tensorflow/contrib/lite/g3doc/_index.yaml
new file mode 100644
index 0000000000..9119e49117
--- /dev/null
+++ b/tensorflow/contrib/lite/g3doc/_index.yaml
@@ -0,0 +1,67 @@
+book_path: /mobile/_book.yaml
+project_path: /mobile/_project.yaml
+description: <!--no description-->
+landing_page:
+ rows:
+ - heading: TensorFlow Lite is a lightweight solution for mobile and embedded devices.
+ items:
+ - description: >
+ TensorFlow Lite is TensorFlow’s lightweight solution for mobile and
+ embedded devices. It enables on-device machine learning inference with
+ low latency and a small binary size. TensorFlow Lite also supports
+ hardware acceleration with the
+ <a href='https://developer.android.com/ndk/guides/neuralnetworks/index.html'>Android Neural Networks API</a>.
+ list:
+ - heading: Key point 1
+ description: >
+ [high-level overview]
+ icon:
+ icon_name: chevron_right
+ foreground: theme
+ background: grey
+ - heading: Key point 2
+ description: >
+ [high-level overview]
+ icon:
+ icon_name: chevron_right
+ foreground: theme
+ background: grey
+ - heading: Key point 3
+ description: >
+ [high-level overview]
+ icon:
+ icon_name: chevron_right
+ foreground: theme
+ background: grey
+ - code_block: |
+ <pre class = "prettyprint">
+ $ toco --input_file=$(pwd)/mobilenet_v1_1.0_224/frozen_graph.pb \
+ --input_format=TENSORFLOW_GRAPHDEF \
+ --output_format=TFLITE \
+ --output_file=/tmp/mobilenet_v1_1.0_224.tflite \
+ --inference_type=FLOAT \
+ --input_type=FLOAT \
+ --input_arrays=input \
+ --output_arrays=MobilenetV1/Predictions/Reshape_1 \
+ --input_shapes=1,224,224,3
+ </pre>
+
+ - classname: devsite-landing-row-cards
+ items:
+ - heading: Using TensorFlow Lite on Android
+ image_path: /ecosystem/images/tf-logo-card-16x9.png
+ path: https://medium.com/tensorflow/using-tensorflow-lite-on-android-9bbc9cb7d69d
+ buttons:
+ - label: Read on TensorFlow blog
+ path: https://medium.com/tensorflow/using-tensorflow-lite-on-android-9bbc9cb7d69d
+ - heading: TensorFlow Lite at the Dev Summit
+ youtube_id: FAMfy7izB6A
+ buttons:
+ - label: Watch the video
+ path: https://www.youtube.com/watch?v=FAMfy7izB6A
+ - heading: TensorFlow Lite on GitHub
+ image_path: /ecosystem/images/github-card-16x9.png
+ path: https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/lite
+ buttons:
+ - label: View on GitHub
+ path: https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/lite
diff --git a/tensorflow/contrib/lite/g3doc/_project.yaml b/tensorflow/contrib/lite/g3doc/_project.yaml
new file mode 100644
index 0000000000..b39666516b
--- /dev/null
+++ b/tensorflow/contrib/lite/g3doc/_project.yaml
@@ -0,0 +1,10 @@
+name: TensorFlow Lite
+breadcrumb_name: Mobile
+home_url: /mobile/
+parent_project_metadata_path: /_project.yaml
+description: >
+ TensorFlow Lite is a lightweight solution for mobile and embedded devices.
+use_site_branding: True
+hide_from_products_list: True
+content_license: cc3-apache2
+buganizer_id: 316308
diff --git a/tensorflow/contrib/lite/g3doc/api_docs/python/_toc.yaml b/tensorflow/contrib/lite/g3doc/api_docs/python/_toc.yaml
new file mode 100644
index 0000000000..1e1c44c692
--- /dev/null
+++ b/tensorflow/contrib/lite/g3doc/api_docs/python/_toc.yaml
@@ -0,0 +1,6 @@
+# Automatically generated file; please do not edit
+toc:
+ - title: TensorFlow Lite
+ section:
+ - title: Overview
+ path: /mobile/api_docs/python/
diff --git a/tensorflow/contrib/lite/g3doc/api_docs/python/index.md b/tensorflow/contrib/lite/g3doc/api_docs/python/index.md
new file mode 100644
index 0000000000..70031a3c3d
--- /dev/null
+++ b/tensorflow/contrib/lite/g3doc/api_docs/python/index.md
@@ -0,0 +1,10 @@
+Project: /mobile/_project.yaml
+Book: /mobile/_book.yaml
+page_type: reference
+<style> table img { max-width: 100%; } </style>
+<script src="/_static/js/managed/mathjax/MathJax.js?config=TeX-AMS-MML_SVG"></script>
+
+<!-- DO NOT EDIT! Automatically generated file. -->
+# All symbols in TensorFlow Lite
+
+TEMP PAGE
diff --git a/tensorflow/contrib/lite/g3doc/apis.md b/tensorflow/contrib/lite/g3doc/apis.md
index e94a2cc44e..776803da8c 100644
--- a/tensorflow/contrib/lite/g3doc/apis.md
+++ b/tensorflow/contrib/lite/g3doc/apis.md
@@ -1,3 +1,6 @@
+book_path: /mobile/_book.yaml
+project_path: /mobile/_project.yaml
+
# TensorFlow Lite APIs
TensorFlow Lite provides programming APIs in C++ and Java, and in both cases
diff --git a/tensorflow/contrib/lite/g3doc/benchmarks.md b/tensorflow/contrib/lite/g3doc/benchmarks.md
deleted file mode 100644
index 96536cba27..0000000000
--- a/tensorflow/contrib/lite/g3doc/benchmarks.md
+++ /dev/null
@@ -1,178 +0,0 @@
-# Performance Benchmark numbers
-
-This document contains the performance benchmark numbers for running a few well
-known models on some Android and iOS devices.
-
-The benchmark numbers were generated by running the [TFLite benchmark
-binary](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/lite/tools/benchmark)
-on Android and running the [iOS benchmark
-app](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/lite/tools/benchmark/ios)
-on iOS.
-
-# Android benchmarks
-
-When running Android benchmarks, the CPU affinity is set to use big cores on the
-device to reduce variance (see
-[details](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/lite/tools/benchmark#reducing-variance-between-runs-on-android)).
-
-Models are assumed to have been downloaded from the link, unzipped and pushed to
-`/data/local/tmp/tflite_models` folder. The benchmark binary is built according
-to instructions listed
-[here](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/lite/tools/benchmark#on-android)
-and is assumed to have been pushed to `/data/local/tmp`.
-
-The following command was used to run the benchmark:
-
-```
-adb shell taskset ${CPU_MASK} /data/local/tmp/benchmark_model \
- --num_threads=1 \
- --graph=/data/local/tmp/tflite_models/${GRAPH} \
- --warmup_runs=1 \
- --num_runs=50 \
- --use_nnapi=false
-```
-
-where `${GRAPH}` is the name of model and `${CPU_MASK}` is the CPU affinity
-chosen according to the following table:
-
-Device | CPU_MASK |
--------| ----------
-Pixel 2 | f0 |
-Pixel xl | 0c |
-
-
-<table>
- <thead>
- <tr>
- <th>Model Name</th>
- <th>Device </th>
- <th>Mean inference time (std dev)</th>
- </tr>
- </thead>
- <tr>
- <td rowspan = 2>
- <a href="http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_1.0_224.tgz">Mobilenet_1.0_224(float)</a>
- </td>
- <td>Pixel 2 </td>
- <td>166.5 ms (2.6 ms)</td>
- </tr>
- <tr>
- <td>Pixel xl </td>
- <td>122.9 ms (1.8 ms) </td>
- </tr>
- <tr>
- <td rowspan = 2>
- <a href="http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_1.0_224_quant.tgz">Mobilenet_1.0_224 (quant)</a>
- </td>
- <td>Pixel 2 </td>
- <td>69.5 ms (0.9 ms)</td>
- </tr>
- <tr>
- <td>Pixel xl </td>
- <td>78.9 ms (2.2 ms) </td>
- </tr>
- <tr>
- <td rowspan = 2>
- <a href="https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/nasnet_mobile_2018_04_27.tgz">NASNet mobile</a>
- </td>
- <td>Pixel 2 </td>
- <td>273.8 ms (3.5 ms)</td>
- </tr>
- <tr>
- <td>Pixel xl </td>
- <td>210.8 ms (4.2 ms)</td>
- </tr>
- <tr>
- <td rowspan = 2>
- <a href="https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/squeezenet_2018_04_27.tgz">SqueezeNet</a>
- </td>
- <td>Pixel 2 </td>
- <td>234.0 ms (2.1 ms)</td>
- </tr>
- <tr>
- <td>Pixel xl </td>
- <td>158.0 ms (2.1 ms)</td>
- </tr>
- <tr>
- <td rowspan = 2>
- <a href="https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/inception_resnet_v2_2018_04_27.tgz">Inception_ResNet_V2</a>
- </td>
- <td>Pixel 2 </td>
- <td>2846.0 ms (15.0 ms)</td>
- </tr>
- <tr>
- <td>Pixel xl </td>
- <td>1973.0 ms (15.0 ms) </td>
- </tr>
- <tr>
- <td rowspan = 2>
- <a href="https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/inception_v4_2018_04_27.tgz">Inception_V4</a>
- </td>
- <td>Pixel 2 </td>
- <td>3180.0 ms (11.7 ms)</td>
- </tr>
- <tr>
- <td>Pixel xl </td>
- <td>2262.0 ms (21.0 ms) </td>
- </tr>
-
- </table>
-
-# iOS benchmarks
-
-For running iOS benchmarks, the [benchmark
-app](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/lite/tools/benchmark/ios)
-was modified to include the appropriate model and `benchmark_params.json` was
-modified to set `num_threads` to 1.
-
-<table>
- <thead>
- <tr>
- <th>Model Name</th>
- <th>Device </th>
- <th>Mean inference time (std dev)</th>
- </tr>
- </thead>
- <tr>
- <td>
- <a href="http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_1.0_224.tgz">Mobilenet_1.0_224(float)</a>
- </td>
- <td>iPhone 8 </td>
- <td>32.2 ms (0.8 ms)</td>
- </tr>
- <tr>
- <td>
- <a href="http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_1.0_224_quant.tgz)">Mobilenet_1.0_224 (quant)</a>
- </td>
- <td>iPhone 8 </td>
- <td>24.4 ms (0.8 ms)</td>
- </tr>
- <tr>
- <td>
- <a href="https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/nasnet_mobile_2018_04_27.tgz">NASNet mobile</a>
- </td>
- <td>iPhone 8 </td>
- <td>60.3 ms (0.6 ms)</td>
- </tr>
- <tr>
- <td>
- <a href="https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/squeezenet_2018_04_27.tgz">SqueezeNet</a>
- </td>
- <td>iPhone 8 </td>
- <td>44.3 (0.7 ms)</td>
- </tr>
- <tr>
- <td>
- <a href="https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/inception_resnet_v2_2018_04_27.tgz">Inception_ResNet_V2</a>
- </td>
- <td>iPhone 8</td>
- <td>562.4 ms (18.2 ms)</td>
- </tr>
- <tr>
- <td>
- <a href="https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/inception_v4_2018_04_27.tgz">Inception_V4</a>
- </td>
- <td>iPhone 8 </td>
- <td>661.0 ms (29.2 ms)</td>
- </tr>
- </table>
diff --git a/tensorflow/contrib/lite/g3doc/custom_operators.md b/tensorflow/contrib/lite/g3doc/custom_operators.md
index f2fbcf64cf..2296f5a064 100644
--- a/tensorflow/contrib/lite/g3doc/custom_operators.md
+++ b/tensorflow/contrib/lite/g3doc/custom_operators.md
@@ -1,3 +1,6 @@
+book_path: /mobile/_book.yaml
+project_path: /mobile/_project.yaml
+
# How to use custom operators
TensorFlow Lite currently supports a subset of TensorFlow operators. However, it
diff --git a/tensorflow/docs_src/mobile/tflite/demo_android.md b/tensorflow/contrib/lite/g3doc/demo_android.md
index fdf0bcf3c1..d79a2696b4 100644
--- a/tensorflow/docs_src/mobile/tflite/demo_android.md
+++ b/tensorflow/contrib/lite/g3doc/demo_android.md
@@ -1,3 +1,6 @@
+book_path: /mobile/_book.yaml
+project_path: /mobile/_project.yaml
+
# Android Demo App
An example Android application using TensorFLow Lite is available
diff --git a/tensorflow/docs_src/mobile/tflite/demo_ios.md b/tensorflow/contrib/lite/g3doc/demo_ios.md
index 3be21da89f..a554898899 100644
--- a/tensorflow/docs_src/mobile/tflite/demo_ios.md
+++ b/tensorflow/contrib/lite/g3doc/demo_ios.md
@@ -1,3 +1,6 @@
+book_path: /mobile/_book.yaml
+project_path: /mobile/_project.yaml
+
# iOS Demo App
The TensorFlow Lite demo is a camera app that continuously classifies whatever
diff --git a/tensorflow/docs_src/mobile/tflite/devguide.md b/tensorflow/contrib/lite/g3doc/devguide.md
index b168d6c183..dc9cc98c08 100644
--- a/tensorflow/docs_src/mobile/tflite/devguide.md
+++ b/tensorflow/contrib/lite/g3doc/devguide.md
@@ -1,3 +1,6 @@
+book_path: /mobile/_book.yaml
+project_path: /mobile/_project.yaml
+
# Developer Guide
Using a TensorFlow Lite model in your mobile app requires multiple
@@ -56,7 +59,7 @@ both floating point and quantized inference.
A developer may choose to train a custom model using Tensorflow (see the
[TensorFlow tutorials](../../tutorials/) for examples of building and training
models). If you have already written a model, the first step is to export this
-to a @{tf.GraphDef} file. This is required because some formats do not store the
+to a `tf.GraphDef` file. This is required because some formats do not store the
model structure outside the code, and we must communicate with other parts of the
framework. See
[Exporting the Inference Graph](https://github.com/tensorflow/models/blob/master/research/slim/README.md)
@@ -71,12 +74,12 @@ grow in future Tensorflow Lite releases.
## 2. Convert the model format
The model generated (or downloaded) in the previous step is a *standard*
-Tensorflow model and you should now have a .pb or .pbtxt @{tf.GraphDef} file.
+Tensorflow model and you should now have a .pb or .pbtxt `tf.GraphDef` file.
Models generated with transfer learning (re-training) or custom models must be
converted—but, we must first freeze the graph to convert the model to the
Tensorflow Lite format. This process uses several model formats:
-* @{tf.GraphDef} (.pb) —A protobuf that represents the TensorFlow training or
+* `tf.GraphDef` (.pb) —A protobuf that represents the TensorFlow training or
computation graph. It contains operators, tensors, and variables definitions.
* *CheckPoint* (.ckpt) —Serialized variables from a TensorFlow graph. Since this
does not contain a graph structure, it cannot be interpreted by itself.
@@ -143,11 +146,11 @@ containing the model architecture. The [frozen_graph.pb](https://storage.googlea
file used here is available for download. `output_file` is where the TensorFlow
Lite model will get generated. The `input_type` and `inference_type`
arguments should be set to `FLOAT`, unless converting a
-@{$performance/quantization$quantized model}. Setting the `input_array`,
-`output_array`, and `input_shape` arguments are not as straightforward. The
-easiest way to find these values is to explore the graph using Tensorboard. Reuse
-the arguments for specifying the output nodes for inference in the
-`freeze_graph` step.
+<a href="https://www.tensorflow.org/performance/quantization">quantized model</a>.
+Setting the `input_array`, `output_array`, and `input_shape` arguments are not as
+straightforward. The easiest way to find these values is to explore the graph
+using Tensorboard. Reuse the arguments for specifying the output nodes for
+inference in the `freeze_graph` step.
It is also possible to use the Tensorflow Optimizing Converter with protobufs
from either Python or from the command line (see the
@@ -204,16 +207,16 @@ The open source Android demo app uses the JNI interface and is available
[on GitHub](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/lite/java/demo/app).
You can also download a
[prebuilt APK](http://download.tensorflow.org/deps/tflite/TfLiteCameraDemo.apk).
-See the @{$tflite/demo_android} guide for details.
+See the <a href="../demo_android.md">Android demo</a> guide for details.
-The @{$mobile/android_build} guide has instructions for installing TensorFlow on
-Android and setting up `bazel` and Android Studio.
+The <a href="./android_build.md">Android mobile</a> guide has instructions for
+installing TensorFlow on Android and setting up `bazel` and Android Studio.
### iOS
To integrate a TensorFlow model in an iOS app, see the
[TensorFlow Lite for iOS](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/lite/g3doc/ios.md)
-guide and @{$tflite/demo_ios} guide.
+guide and <a href="../demo_ios.md">iOS demo</a> guide.
#### Core ML support
diff --git a/tensorflow/contrib/lite/g3doc/ios.md b/tensorflow/contrib/lite/g3doc/ios.md
index e0358a444d..d78d373ccf 100644
--- a/tensorflow/contrib/lite/g3doc/ios.md
+++ b/tensorflow/contrib/lite/g3doc/ios.md
@@ -1,3 +1,6 @@
+book_path: /mobile/_book.yaml
+project_path: /mobile/_project.yaml
+
# TensorFlow Lite for iOS
## Building
diff --git a/tensorflow/contrib/lite/g3doc/models.md b/tensorflow/contrib/lite/g3doc/models.md
index 4e7d33a1b6..3292aece0e 100644
--- a/tensorflow/contrib/lite/g3doc/models.md
+++ b/tensorflow/contrib/lite/g3doc/models.md
@@ -1,3 +1,6 @@
+book_path: /mobile/_book.yaml
+project_path: /mobile/_project.yaml
+
# List of Hosted Models
## Image classification (Float Models)
diff --git a/tensorflow/contrib/lite/g3doc/ops_versioning.md b/tensorflow/contrib/lite/g3doc/ops_versioning.md
index bd2f797e6c..b06f4fd3b8 100644
--- a/tensorflow/contrib/lite/g3doc/ops_versioning.md
+++ b/tensorflow/contrib/lite/g3doc/ops_versioning.md
@@ -1,3 +1,6 @@
+book_path: /mobile/_book.yaml
+project_path: /mobile/_project.yaml
+
# TensorFlow Lite Ops Versioning
This document describes TensorFlow Lite's op versioning schema. Op
diff --git a/tensorflow/docs_src/mobile/tflite/index.md b/tensorflow/contrib/lite/g3doc/overview.md
index cc4af2a875..be60d7941a 100644
--- a/tensorflow/docs_src/mobile/tflite/index.md
+++ b/tensorflow/contrib/lite/g3doc/overview.md
@@ -1,3 +1,6 @@
+book_path: /mobile/_book.yaml
+project_path: /mobile/_project.yaml
+
# Introduction to TensorFlow Lite
TensorFlow Lite is TensorFlow’s lightweight solution for mobile and embedded
diff --git a/tensorflow/docs_src/mobile/tflite/performance.md b/tensorflow/contrib/lite/g3doc/performance.md
index 79bacaaa1b..613e9f97c3 100644
--- a/tensorflow/docs_src/mobile/tflite/performance.md
+++ b/tensorflow/contrib/lite/g3doc/performance.md
@@ -1,3 +1,6 @@
+book_path: /mobile/_book.yaml
+project_path: /mobile/_project.yaml
+
# Performance
This document lists TensorFlow Lite performance benchmarks when running well
diff --git a/tensorflow/contrib/lite/g3doc/rpi.md b/tensorflow/contrib/lite/g3doc/rpi.md
index ab50789307..cdc9172d87 100644
--- a/tensorflow/contrib/lite/g3doc/rpi.md
+++ b/tensorflow/contrib/lite/g3doc/rpi.md
@@ -1,3 +1,6 @@
+book_path: /mobile/_book.yaml
+project_path: /mobile/_project.yaml
+
# TensorFlow Lite for Raspberry Pi
## Cross compiling
diff --git a/tensorflow/contrib/lite/g3doc/tf_ops_compatibility.md b/tensorflow/contrib/lite/g3doc/tf_ops_compatibility.md
index 967259b7a6..0e8f4339fc 100644
--- a/tensorflow/contrib/lite/g3doc/tf_ops_compatibility.md
+++ b/tensorflow/contrib/lite/g3doc/tf_ops_compatibility.md
@@ -1,3 +1,6 @@
+book_path: /mobile/_book.yaml
+project_path: /mobile/_project.yaml
+
# TensorFlow Lite & TensorFlow Compatibility Guide
TensorFlow Lite supports a number of TensorFlow operations used in common
diff --git a/tensorflow/docs_src/mobile/android_build.md b/tensorflow/contrib/lite/g3doc/tfmobile/android_build.md
index f4b07db459..76e16fc9db 100644
--- a/tensorflow/docs_src/mobile/android_build.md
+++ b/tensorflow/contrib/lite/g3doc/tfmobile/android_build.md
@@ -1,3 +1,6 @@
+book_path: /mobile/_book.yaml
+project_path: /mobile/_project.yaml
+
# Building TensorFlow on Android
To get you started working with TensorFlow on Android, we'll walk through two
@@ -91,7 +94,8 @@ using [ADB](https://developer.android.com/studio/command-line/adb.html). This
requires some knowledge of build systems and Android developer tools, but we'll
guide you through the basics here.
-- First, follow our instructions for @{$install/install_sources$installing from sources}.
+- First, follow our instructions for
+ <a href="http://www.tensorflow.org/install/install_sources">installing from sources</a>.
This will also guide you through installing Bazel and cloning the
TensorFlow code.
diff --git a/tensorflow/docs_src/mobile/mobile_intro.md b/tensorflow/contrib/lite/g3doc/tfmobile/index.md
index baad443308..bd047bfcec 100644
--- a/tensorflow/docs_src/mobile/mobile_intro.md
+++ b/tensorflow/contrib/lite/g3doc/tfmobile/index.md
@@ -1,4 +1,45 @@
-# Introduction to TensorFlow Mobile
+book_path: /mobile/_book.yaml
+project_path: /mobile/_project.yaml
+
+# Overview
+
+TensorFlow was designed to be a good deep learning solution for mobile
+platforms. Currently we have two solutions for deploying machine learning
+applications on mobile and embedded devices: TensorFlow for Mobile and
+<a href="../index.md">TensorFlow Lite</a>.
+
+## TensorFlow Lite versus TensorFlow Mobile
+
+Here are a few of the differences between the two:
+
+- TensorFlow Lite is an evolution of TensorFlow Mobile. In most cases, apps
+ developed with TensorFlow Lite will have a smaller binary size, fewer
+ dependencies, and better performance.
+
+- TensorFlow Lite is in developer preview, so not all use cases are covered yet.
+ We expect you to use TensorFlow Mobile to cover production cases.
+
+- TensorFlow Lite supports only a limited set of operators, so not all models
+ will work on it by default. TensorFlow for Mobile has a fuller set of
+ supported functionality.
+
+TensorFlow Lite provides better performance and a small binary size on mobile
+platforms as well as the ability to leverage hardware acceleration if available
+on their platforms. In addition, it has many fewer dependencies so it can be
+built and hosted on simpler, more constrained device scenarios. TensorFlow Lite
+also allows targeting accelerators through the [Neural Networks
+API](https://developer.android.com/ndk/guides/neuralnetworks/index.html).
+
+TensorFlow Lite currently has coverage for a limited set of operators. While
+TensorFlow for Mobile supports only a constrained set of ops by default, in
+principle if you use an arbitrary operator in TensorFlow, it can be customized
+to build that kernel. Thus use cases which are not currently supported by
+TensorFlow Lite should continue to use TensorFlow for Mobile. As TensorFlow Lite
+evolves, it will gain additional operators, and the decision will be easier to
+make.
+
+
+## Introduction to TensorFlow Mobile
TensorFlow was designed from the ground up to be a good deep learning solution
for mobile platforms like Android and iOS. This mobile guide should help you
@@ -167,7 +208,7 @@ interesting products possible.
TensorFlow runs on Ubuntu Linux, Windows 10, and OS X. For a list of all
supported operating systems and instructions to install TensorFlow, see
-@{$install$Installing Tensorflow}.
+<a href="https://www.tensorflow.org/install">Installing Tensorflow</a>.
Note that some of the sample code we provide for mobile TensorFlow requires you
to compile TensorFlow from source, so you’ll need more than just `pip install`
@@ -241,8 +282,3 @@ results you’ll see. It’s common for an algorithm to get great training accur
numbers but then fail to be useful within a real application because there’s a
mismatch between the dataset and real usage. Prototype end-to-end usage as soon
as possible to create a consistent user experience.
-
-## Next Steps
-
-We suggest you get started by building one of our demos for
-@{$mobile/android_build$Android} or @{$mobile/ios_build$iOS}.
diff --git a/tensorflow/docs_src/mobile/ios_build.md b/tensorflow/contrib/lite/g3doc/tfmobile/ios_build.md
index 4c84a1214a..6223707892 100644
--- a/tensorflow/docs_src/mobile/ios_build.md
+++ b/tensorflow/contrib/lite/g3doc/tfmobile/ios_build.md
@@ -1,3 +1,6 @@
+book_path: /mobile/_book.yaml
+project_path: /mobile/_project.yaml
+
# Building TensorFlow on iOS
## Using CocoaPods
diff --git a/tensorflow/docs_src/mobile/linking_libs.md b/tensorflow/contrib/lite/g3doc/tfmobile/linking_libs.md
index efef5dd0da..4c2071ed05 100644
--- a/tensorflow/docs_src/mobile/linking_libs.md
+++ b/tensorflow/contrib/lite/g3doc/tfmobile/linking_libs.md
@@ -1,3 +1,6 @@
+book_path: /mobile/_book.yaml
+project_path: /mobile/_project.yaml
+
# Integrating TensorFlow libraries
Once you have made some progress on a model that addresses the problem you’re
@@ -14,11 +17,11 @@ TensorFlow mobile demo apps.
After you've managed to build the examples, you'll probably want to call
TensorFlow from one of your existing applications. The very easiest way to do
-this is to use the Pod installation steps described
-@{$mobile/ios_build#using_cocoapods$here}, but if you want to build TensorFlow
-from source (for example to customize which operators are included) you'll need
-to break out TensorFlow as a framework, include the right header files, and link
-against the built libraries and dependencies.
+this is to use the Pod installation steps described in
+<a href="./ios_build.md">Building TensorFlow on iOS</a>, but if you want to build
+TensorFlow from source (for example to customize which operators are included)
+you'll need to break out TensorFlow as a framework, include the right header
+files, and link against the built libraries and dependencies.
### Android
@@ -82,10 +85,12 @@ recompile of the core.
To achieve this capability, TensorFlow uses a registration pattern in a lot of
places. In the code, it looks like this:
- class MulKernel : OpKernel {
- Status Compute(OpKernelContext* context) { … }
- };
- REGISTER_KERNEL(MulKernel, “Mul”);
+```
+class MulKernel : OpKernel {
+ Status Compute(OpKernelContext* context) { … }
+};
+REGISTER_KERNEL(MulKernel, “Mul”);
+```
This would be in a standalone `.cc` file linked into your application, either
as part of the main set of kernels or as a separate custom library. The magic
@@ -101,15 +106,17 @@ doesn’t offer a good mechanism for doing this sort of registration, so we have
to resort to some tricky code. Under the hood, the macro is implemented so that
it produces something like this:
- class RegisterMul {
- public:
- RegisterMul() {
- global_kernel_registry()->Register(“Mul”, [](){
- return new MulKernel()
- });
- }
- };
- RegisterMul g_register_mul;
+```
+class RegisterMul {
+ public:
+ RegisterMul() {
+ global_kernel_registry()->Register(“Mul”, [](){
+ return new MulKernel()
+ });
+ }
+};
+RegisterMul g_register_mul;
+```
This sets up a class `RegisterMul` with a constructor that tells the global
kernel registry what function to call when somebody asks it how to create a
@@ -176,8 +183,10 @@ have an experimental script at [rename_protobuf.sh](https://github.com/tensorflo
You need to run this as part of the makefile build, after you’ve downloaded all
the dependencies:
- tensorflow/contrib/makefile/download_dependencies.sh
- tensorflow/contrib/makefile/rename_protobuf.sh
+```
+tensorflow/contrib/makefile/download_dependencies.sh
+tensorflow/contrib/makefile/rename_protobuf.sh
+```
## Calling the TensorFlow API
@@ -193,18 +202,20 @@ use case, while on iOS and Raspberry Pi you call directly into the C++ API.
Here’s what a typical Inference Library sequence looks like on Android:
- // Load the model from disk.
- TensorFlowInferenceInterface inferenceInterface =
- new TensorFlowInferenceInterface(assetManager, modelFilename);
+```
+// Load the model from disk.
+TensorFlowInferenceInterface inferenceInterface =
+new TensorFlowInferenceInterface(assetManager, modelFilename);
- // Copy the input data into TensorFlow.
- inferenceInterface.feed(inputName, floatValues, 1, inputSize, inputSize, 3);
+// Copy the input data into TensorFlow.
+inferenceInterface.feed(inputName, floatValues, 1, inputSize, inputSize, 3);
- // Run the inference call.
- inferenceInterface.run(outputNames, logStats);
+// Run the inference call.
+inferenceInterface.run(outputNames, logStats);
- // Copy the output Tensor back into the output array.
- inferenceInterface.fetch(outputName, outputs);
+// Copy the output Tensor back into the output array.
+inferenceInterface.fetch(outputName, outputs);
+```
You can find the source of this code in the [Android examples](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/android/src/org/tensorflow/demo/TensorFlowImageClassifier.java#L107).
@@ -212,27 +223,29 @@ You can find the source of this code in the [Android examples](https://github.co
Here’s the equivalent code for iOS and Raspberry Pi:
- // Load the model.
- PortableReadFileToProto(file_path, &tensorflow_graph);
-
- // Create a session from the model.
- tensorflow::Status s = session->Create(tensorflow_graph);
- if (!s.ok()) {
- LOG(FATAL) << "Could not create TensorFlow Graph: " << s;
- }
-
- // Run the model.
- std::string input_layer = "input";
- std::string output_layer = "output";
- std::vector<tensorflow::Tensor> outputs;
- tensorflow::Status run_status = session->Run({{input_layer, image_tensor}},
+```
+// Load the model.
+PortableReadFileToProto(file_path, &tensorflow_graph);
+
+// Create a session from the model.
+tensorflow::Status s = session->Create(tensorflow_graph);
+if (!s.ok()) {
+ LOG(FATAL) << "Could not create TensorFlow Graph: " << s;
+}
+
+// Run the model.
+std::string input_layer = "input";
+std::string output_layer = "output";
+std::vector<tensorflow::Tensor> outputs;
+tensorflow::Status run_status = session->Run({\{input_layer, image_tensor}},
{output_layer}, {}, &outputs);
- if (!run_status.ok()) {
- LOG(FATAL) << "Running model failed: " << run_status;
- }
+if (!run_status.ok()) {
+ LOG(FATAL) << "Running model failed: " << run_status;
+}
- // Access the output data.
- tensorflow::Tensor* output = &outputs[0];
+// Access the output data.
+tensorflow::Tensor* output = &outputs[0];
+```
This is all based on the
[iOS sample code](https://www.tensorflow.org/code/tensorflow/examples/ios/simple/RunModelViewController.mm),
diff --git a/tensorflow/docs_src/mobile/optimizing.md b/tensorflow/contrib/lite/g3doc/tfmobile/optimizing.md
index 778e4d3a62..a0192c3541 100644
--- a/tensorflow/docs_src/mobile/optimizing.md
+++ b/tensorflow/contrib/lite/g3doc/tfmobile/optimizing.md
@@ -1,3 +1,6 @@
+book_path: /mobile/_book.yaml
+project_path: /mobile/_project.yaml
+
# Optimizing for mobile
There are some special issues that you have to deal with when you’re trying to
@@ -77,7 +80,7 @@ out of a mobile device's memory faster.
To understand how large your network will be on disk, start by looking at the
size on disk of your `GraphDef` file after you’ve run `freeze_graph` and
-`strip_unused_nodes` on it (see @{$mobile/prepare_models$Preparing models} for
+`strip_unused_nodes` on it (see <a href="./prepare_models.md">Preparing models</a> for
more details on these tools), since then it should only contain
inference-related nodes. To double-check that your results are as expected, run
the `summarize_graph` tool to see how many parameters are in constants:
@@ -103,7 +106,8 @@ you multiply the number of const parameters by four, you should get something
that’s close to the size of the file on disk. You can often get away with only
eight-bits per parameter with very little loss of accuracy in the final result,
so if your file size is too large you can try using
-@{$performance/quantization$quantize_weights} to transform the parameters down.
+<a href="https://www.tensorflow.org/performance/quantization">quantize_weights</a>
+to transform the parameters down.
bazel build tensorflow/tools/graph_transforms:transform_graph && \
bazel-bin/tensorflow/tools/graph_transforms/transform_graph \
@@ -292,7 +296,8 @@ run it on a 64-bit ARM device:
You can interpret the results in exactly the same way as the desktop version
above. If you have any trouble figuring out what the right input and output
-names and types are, take a look at the @{$mobile/prepare_models$Preparing models}
+names and types are, take a look at the
+<a href="./prepare_models">Preparing models</a>
page for details about detecting these for your model, and look at the
`summarize_graph` tool which may give you
helpful information.
diff --git a/tensorflow/docs_src/mobile/prepare_models.md b/tensorflow/contrib/lite/g3doc/tfmobile/prepare_models.md
index 2b84dbb973..6b4e4a92bd 100644
--- a/tensorflow/docs_src/mobile/prepare_models.md
+++ b/tensorflow/contrib/lite/g3doc/tfmobile/prepare_models.md
@@ -1,3 +1,6 @@
+book_path: /mobile/_book.yaml
+project_path: /mobile/_project.yaml
+
# Preparing models for mobile deployment
The requirements for storing model information during training are very
@@ -255,8 +258,8 @@ The criteria for including ops and types fall into several categories:
These ops are trimmed by default to optimize for inference on mobile, but it is
possible to alter some build files to change the default. After alternating the
build files, you will need to recompile TensorFlow. See below for more details
-on how to do this, and also see @{$mobile/optimizing#binary_size$Optimizing} for
-more on reducing your binary size.
+on how to do this, and also see <a href="./optimizing.md">optimizing binary size</a>
+for more on reducing your binary size.
### Locate the implementation
diff --git a/tensorflow/docs_src/mobile/README.md b/tensorflow/docs_src/mobile/README.md
new file mode 100644
index 0000000000..ecf4267265
--- /dev/null
+++ b/tensorflow/docs_src/mobile/README.md
@@ -0,0 +1,3 @@
+# TF Lite subsite
+
+This subsite directory lives in [tensorflow/contrib/lite/g3doc](../../contrib/lite/g3doc/).
diff --git a/tensorflow/docs_src/mobile/index.md b/tensorflow/docs_src/mobile/index.md
deleted file mode 100644
index 6032fcad02..0000000000
--- a/tensorflow/docs_src/mobile/index.md
+++ /dev/null
@@ -1,33 +0,0 @@
-# Overview
-
-TensorFlow was designed to be a good deep learning solution for mobile
-platforms. Currently we have two solutions for deploying machine learning
-applications on mobile and embedded devices:
-@{$mobile/mobile_intro$TensorFlow for Mobile} and @{$mobile/tflite$TensorFlow Lite}.
-
-## TensorFlow Lite versus TensorFlow Mobile
-
-Here are a few of the differences between the two:
-
-- TensorFlow Lite is an evolution of TensorFlow Mobile. In most cases, apps
- developed with TensorFlow Lite will have a smaller binary size, fewer
- dependencies, and better performance.
-
-- TensorFlow Lite supports only a limited set of operators, so not all models
- will work on it by default. TensorFlow for Mobile has a fuller set of
- supported functionality.
-
-TensorFlow Lite provides better performance and a small binary size on mobile
-platforms as well as the ability to leverage hardware acceleration if available
-on their platforms. In addition, it has many fewer dependencies so it can be
-built and hosted on simpler, more constrained device scenarios. TensorFlow Lite
-also allows targeting accelerators through the [Neural Networks
-API](https://developer.android.com/ndk/guides/neuralnetworks/index.html).
-
-TensorFlow Lite currently has coverage for a limited set of operators. While
-TensorFlow for Mobile supports only a constrained set of ops by default, in
-principle if you use an arbitrary operator in TensorFlow, it can be customized
-to build that kernel. Thus use cases which are not currently supported by
-TensorFlow Lite should continue to use TensorFlow for Mobile. As TensorFlow Lite
-evolves, it will gain additional operators, and the decision will be easier to
-make.
diff --git a/tensorflow/docs_src/mobile/leftnav_files b/tensorflow/docs_src/mobile/leftnav_files
deleted file mode 100644
index 97340ef7e1..0000000000
--- a/tensorflow/docs_src/mobile/leftnav_files
+++ /dev/null
@@ -1,15 +0,0 @@
-index.md
-### TensorFlow Lite
-tflite/index.md
-tflite/devguide.md
-tflite/demo_android.md
-tflite/demo_ios.md
-tflite/performance.md
->>>
-### TensorFlow Mobile
-mobile_intro.md
-android_build.md
-ios_build.md
-linking_libs.md
-prepare_models.md
-optimizing.md