aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/examples
diff options
context:
space:
mode:
authorGravatar Yifei Feng <yifeif@google.com>2018-02-22 14:24:57 -0800
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-02-22 14:29:27 -0800
commitdce9a49c19f406ba45919e8c94474e55dc5ccd54 (patch)
tree928db8a52603e00aef76985cda16b8bceb9debb2 /tensorflow/examples
parentcb7e1963c625fd9713e7475d85621f95be6762f1 (diff)
Merge changes from github.
PiperOrigin-RevId: 186674197
Diffstat (limited to 'tensorflow/examples')
-rw-r--r--tensorflow/examples/android/res/animator/color_animation.xml30
-rw-r--r--tensorflow/examples/android/src/org/tensorflow/demo/SpeechActivity.java21
-rw-r--r--tensorflow/examples/get_started/regression/imports85.py11
-rw-r--r--tensorflow/examples/image_retraining/retrain.py55
-rw-r--r--tensorflow/examples/speech_commands/label_wav_dir.py136
-rw-r--r--tensorflow/examples/speech_commands/train.py6
-rw-r--r--tensorflow/examples/udacity/5_word2vec.ipynb2
7 files changed, 240 insertions, 21 deletions
diff --git a/tensorflow/examples/android/res/animator/color_animation.xml b/tensorflow/examples/android/res/animator/color_animation.xml
new file mode 100644
index 0000000000..891d8cc1d4
--- /dev/null
+++ b/tensorflow/examples/android/res/animator/color_animation.xml
@@ -0,0 +1,30 @@
+<?xml version="1.0" encoding="utf-8"?><!--
+ Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<set xmlns:android="http://schemas.android.com/apk/res/android"
+ android:ordering="sequentially">
+ <objectAnimator
+ android:propertyName="backgroundColor"
+ android:duration="375"
+ android:valueFrom="0x00b3ccff"
+ android:valueTo="0xffb3ccff"
+ android:valueType="colorType"/>
+ <objectAnimator
+ android:propertyName="backgroundColor"
+ android:duration="375"
+ android:valueFrom="0xffb3ccff"
+ android:valueTo="0x00b3ccff"
+ android:valueType="colorType"/>
+</set>
diff --git a/tensorflow/examples/android/src/org/tensorflow/demo/SpeechActivity.java b/tensorflow/examples/android/src/org/tensorflow/demo/SpeechActivity.java
index 184df1bdb4..1cddf3dc55 100644
--- a/tensorflow/examples/android/src/org/tensorflow/demo/SpeechActivity.java
+++ b/tensorflow/examples/android/src/org/tensorflow/demo/SpeechActivity.java
@@ -31,7 +31,8 @@ the RecognizeCommands helper class.
package org.tensorflow.demo;
-import android.animation.ValueAnimator;
+import android.animation.AnimatorInflater;
+import android.animation.AnimatorSet;
import android.app.Activity;
import android.content.pm.PackageManager;
import android.media.AudioFormat;
@@ -329,17 +330,13 @@ public class SpeechActivity extends Activity {
labelIndex = i;
}
}
- final View labelView = (View) labelsListView.getChildAt(labelIndex - 2);
- ValueAnimator colorAnimation =
- ValueAnimator.ofArgb(0x00b3ccff, 0xffb3ccff, 0x00b3ccff);
- colorAnimation.setDuration(750);
- colorAnimation.addUpdateListener(
- new ValueAnimator.AnimatorUpdateListener() {
- @Override
- public void onAnimationUpdate(ValueAnimator animator) {
- labelView.setBackgroundColor((int) animator.getAnimatedValue());
- }
- });
+ final View labelView = labelsListView.getChildAt(labelIndex - 2);
+
+ AnimatorSet colorAnimation =
+ (AnimatorSet)
+ AnimatorInflater.loadAnimator(
+ SpeechActivity.this, R.animator.color_animation);
+ colorAnimation.setTarget(labelView);
colorAnimation.start();
}
}
diff --git a/tensorflow/examples/get_started/regression/imports85.py b/tensorflow/examples/get_started/regression/imports85.py
index 6bee556eb8..4fdaceea9a 100644
--- a/tensorflow/examples/get_started/regression/imports85.py
+++ b/tensorflow/examples/get_started/regression/imports85.py
@@ -131,11 +131,12 @@ def dataset(y_name="price", train_fraction=0.7):
# booleans but we are dealing with symbolic tensors.
return ~in_training_set(line)
- base_dataset = (tf.contrib.data
- # Get the lines from the file.
- .TextLineDataset(path)
- # drop lines with question marks.
- .filter(has_no_question_marks))
+ base_dataset = (
+ tf.data
+ # Get the lines from the file.
+ .TextLineDataset(path)
+ # drop lines with question marks.
+ .filter(has_no_question_marks))
train = (base_dataset
# Take only the training-set lines.
diff --git a/tensorflow/examples/image_retraining/retrain.py b/tensorflow/examples/image_retraining/retrain.py
index 868310cbc0..25e09fecbf 100644
--- a/tensorflow/examples/image_retraining/retrain.py
+++ b/tensorflow/examples/image_retraining/retrain.py
@@ -41,7 +41,6 @@ The subfolder names are important, since they define what label is applied to
each image, but the filenames themselves don't matter. Once your images are
prepared, you can run the training with a command like this:
-
```bash
bazel build tensorflow/examples/image_retraining:retrain && \
bazel-bin/tensorflow/examples/image_retraining/retrain \
@@ -70,12 +69,14 @@ on resource-limited platforms, you can try the `--architecture` flag with a
Mobilenet model. For example:
Run floating-point version of mobilenet:
+
```bash
python tensorflow/examples/image_retraining/retrain.py \
--image_dir ~/flower_photos --architecture mobilenet_1.0_224
```
Run quantized version of mobilenet:
+
```bash
python tensorflow/examples/image_retraining/retrain.py \
--image_dir ~/flower_photos/ --architecture mobilenet_1.0_224_quantized
@@ -96,6 +97,12 @@ Visualize the summaries with this command:
tensorboard --logdir /tmp/retrain_logs
+To use with Tensorflow Serving:
+
+```bash
+tensorflow_model_server --port=9000 --model_name=inception \
+ --model_base_path=/tmp/saved_models/
+```
"""
from __future__ import absolute_import
from __future__ import division
@@ -1004,6 +1011,45 @@ def add_jpeg_decoding(input_width, input_height, input_depth, input_mean,
return jpeg_data, mul_image
+def export_model(sess, architecture, saved_model_dir):
+ """Exports model for serving.
+
+ Args:
+ sess: Current active TensorFlow Session.
+ architecture: Model architecture.
+ saved_model_dir: Directory in which to save exported model and variables.
+ """
+ if architecture == 'inception_v3':
+ input_tensor = 'DecodeJpeg/contents:0'
+ elif architecture.startswith('mobilenet_'):
+ input_tensor = 'input:0'
+ else:
+ raise ValueError('Unknown architecture', architecture)
+ in_image = sess.graph.get_tensor_by_name(input_tensor)
+ inputs = {'image': tf.saved_model.utils.build_tensor_info(in_image)}
+
+ out_classes = sess.graph.get_tensor_by_name('final_result:0')
+ outputs = {'prediction': tf.saved_model.utils.build_tensor_info(out_classes)}
+
+ signature = tf.saved_model.signature_def_utils.build_signature_def(
+ inputs=inputs,
+ outputs=outputs,
+ method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME)
+
+ legacy_init_op = tf.group(tf.tables_initializer(), name='legacy_init_op')
+
+ # Save out the SavedModel.
+ builder = tf.saved_model.builder.SavedModelBuilder(saved_model_dir)
+ builder.add_meta_graph_and_variables(
+ sess, [tf.saved_model.tag_constants.SERVING],
+ signature_def_map={
+ tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
+ signature
+ },
+ legacy_init_op=legacy_init_op)
+ builder.save()
+
+
def main(_):
# Needed to make sure the logging output is visible.
# See https://github.com/tensorflow/tensorflow/issues/3047
@@ -1179,6 +1225,8 @@ def main(_):
with gfile.FastGFile(FLAGS.output_labels, 'w') as f:
f.write('\n'.join(image_lists.keys()) + '\n')
+ export_model(sess, FLAGS.architecture, FLAGS.saved_model_dir)
+
if __name__ == '__main__':
parser = argparse.ArgumentParser()
@@ -1362,5 +1410,10 @@ if __name__ == '__main__':
takes 128x128 images. See https://research.googleblog.com/2017/06/mobilenets-open-source-models-for.html
for more information on Mobilenet.\
""")
+ parser.add_argument(
+ '--saved_model_dir',
+ type=str,
+ default='/tmp/saved_models/1/',
+ help='Where to save the exported graph.')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
diff --git a/tensorflow/examples/speech_commands/label_wav_dir.py b/tensorflow/examples/speech_commands/label_wav_dir.py
new file mode 100644
index 0000000000..a34db512dd
--- /dev/null
+++ b/tensorflow/examples/speech_commands/label_wav_dir.py
@@ -0,0 +1,136 @@
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+r"""Runs a trained audio graph against WAVE files and reports the results.
+
+The model, labels and .wav files specified in the arguments will be loaded, and
+then the predictions from running the model against the audio data will be
+printed to the console. This is a useful script for sanity checking trained
+models, and as an example of how to use an audio model from Python.
+
+Here's an example of running it:
+
+python tensorflow/examples/speech_commands/label_wav_dir.py \
+--graph=/tmp/my_frozen_graph.pb \
+--labels=/tmp/speech_commands_train/conv_labels.txt \
+--wav_dir=/tmp/speech_dataset/left
+
+"""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import argparse
+import glob
+import sys
+
+import tensorflow as tf
+
+# pylint: disable=unused-import
+from tensorflow.contrib.framework.python.ops import audio_ops as contrib_audio
+# pylint: enable=unused-import
+
+FLAGS = None
+
+
+def load_graph(filename):
+ """Unpersists graph from file as default graph."""
+ with tf.gfile.FastGFile(filename, 'rb') as f:
+ graph_def = tf.GraphDef()
+ graph_def.ParseFromString(f.read())
+ tf.import_graph_def(graph_def, name='')
+
+
+def load_labels(filename):
+ """Read in labels, one label per line."""
+ return [line.rstrip() for line in tf.gfile.GFile(filename)]
+
+
+def run_graph(wav_dir, labels, input_layer_name, output_layer_name,
+ num_top_predictions):
+ """Runs the audio data through the graph and prints predictions."""
+ with tf.Session() as sess:
+ # Feed the audio data as input to the graph.
+ # predictions will contain a two-dimensional array, where one
+ # dimension represents the input image count, and the other has
+ # predictions per class
+ for wav_path in glob.glob(wav_dir + '/*.wav'):
+ if not wav_path or not tf.gfile.Exists(wav_path):
+ tf.logging.fatal('Audio file does not exist %s', wav_path)
+
+ with open(wav_path, 'rb') as wav_file:
+ wav_data = wav_file.read()
+
+ softmax_tensor = sess.graph.get_tensor_by_name(output_layer_name)
+ predictions, = sess.run(softmax_tensor, {input_layer_name: wav_data})
+
+ # Sort to show labels in order of confidence
+ print('\n%s' % (wav_path.split('/')[-1]))
+ top_k = predictions.argsort()[-num_top_predictions:][::-1]
+ for node_id in top_k:
+ human_string = labels[node_id]
+ score = predictions[node_id]
+ print('%s (score = %.5f)' % (human_string, score))
+
+ return 0
+
+
+def label_wav(wav_dir, labels, graph, input_name, output_name, how_many_labels):
+ """Loads the model and labels, and runs the inference to print predictions."""
+ if not labels or not tf.gfile.Exists(labels):
+ tf.logging.fatal('Labels file does not exist %s', labels)
+
+ if not graph or not tf.gfile.Exists(graph):
+ tf.logging.fatal('Graph file does not exist %s', graph)
+
+ labels_list = load_labels(labels)
+
+ # load graph, which is stored in the default session
+ load_graph(graph)
+
+ run_graph(wav_dir, labels_list, input_name, output_name, how_many_labels)
+
+
+def main(_):
+ """Entry point for script, converts flags to arguments."""
+ label_wav(FLAGS.wav_dir, FLAGS.labels, FLAGS.graph, FLAGS.input_name,
+ FLAGS.output_name, FLAGS.how_many_labels)
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ '--wav_dir', type=str, default='', help='Audio file to be identified.')
+ parser.add_argument(
+ '--graph', type=str, default='', help='Model to use for identification.')
+ parser.add_argument(
+ '--labels', type=str, default='', help='Path to file containing labels.')
+ parser.add_argument(
+ '--input_name',
+ type=str,
+ default='wav_data:0',
+ help='Name of WAVE data input node in model.')
+ parser.add_argument(
+ '--output_name',
+ type=str,
+ default='labels_softmax:0',
+ help='Name of node outputting a prediction in the model.')
+ parser.add_argument(
+ '--how_many_labels',
+ type=int,
+ default=3,
+ help='Number of results to show.')
+
+ FLAGS, unparsed = parser.parse_known_args()
+ tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
diff --git a/tensorflow/examples/speech_commands/train.py b/tensorflow/examples/speech_commands/train.py
index a4e80041f8..07c1919347 100644
--- a/tensorflow/examples/speech_commands/train.py
+++ b/tensorflow/examples/speech_commands/train.py
@@ -357,12 +357,14 @@ if __name__ == '__main__':
'--window_size_ms',
type=float,
default=30.0,
- help='How long each spectrogram timeslice is',)
+ help='How long each spectrogram timeslice is.',
+ )
parser.add_argument(
'--window_stride_ms',
type=float,
default=10.0,
- help='How long each spectrogram timeslice is',)
+ help='How far to move in time between spectogram timeslices.',
+ )
parser.add_argument(
'--dct_coefficient_count',
type=int,
diff --git a/tensorflow/examples/udacity/5_word2vec.ipynb b/tensorflow/examples/udacity/5_word2vec.ipynb
index 18c456cad7..3b43d1fb55 100644
--- a/tensorflow/examples/udacity/5_word2vec.ipynb
+++ b/tensorflow/examples/udacity/5_word2vec.ipynb
@@ -455,7 +455,7 @@
" \n",
" # Compute the similarity between minibatch examples and all embeddings.\n",
" # We use the cosine distance:\n",
- " norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))\n",
+ " norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keepdims=True))\n",
" normalized_embeddings = embeddings / norm\n",
" valid_embeddings = tf.nn.embedding_lookup(\n",
" normalized_embeddings, valid_dataset)\n",