aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/examples/tutorials
diff options
context:
space:
mode:
authorGravatar Sanders Kleinfeld <skleinfeld@google.com>2017-01-09 17:55:52 -0800
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2017-01-09 18:07:30 -0800
commit3afdb06d64509ae99a157ce07955331352518160 (patch)
tree633d37ab6b83b3a83cd7abf1178c6ff777157b7c /tensorflow/examples/tutorials
parent61d0f1572c7701aa0e73c0bedd5c57b3f80f22e7 (diff)
Layers tutorial and corresponding example code.
Change: 144031391
Diffstat (limited to 'tensorflow/examples/tutorials')
-rw-r--r--tensorflow/examples/tutorials/layers/BUILD32
-rw-r--r--tensorflow/examples/tutorials/layers/cnn_mnist.py162
2 files changed, 194 insertions, 0 deletions
diff --git a/tensorflow/examples/tutorials/layers/BUILD b/tensorflow/examples/tutorials/layers/BUILD
new file mode 100644
index 0000000000..ba9e1f60e1
--- /dev/null
+++ b/tensorflow/examples/tutorials/layers/BUILD
@@ -0,0 +1,32 @@
+# Example Estimator model
+
+package(
+ default_visibility = ["//visibility:public"],
+)
+
+licenses(["notice"]) # Apache 2.0
+
+exports_files(["LICENSE"])
+
+py_binary(
+ name = "cnn_mnist",
+ srcs = [
+ "cnn_mnist.py",
+ ],
+ srcs_version = "PY2AND3",
+ deps = [
+ "//tensorflow:tensorflow_py",
+ ],
+)
+
+filegroup(
+ name = "all_files",
+ srcs = glob(
+ ["**/*"],
+ exclude = [
+ "**/METADATA",
+ "**/OWNERS",
+ ],
+ ),
+ visibility = ["//tensorflow:__subpackages__"],
+)
diff --git a/tensorflow/examples/tutorials/layers/cnn_mnist.py b/tensorflow/examples/tutorials/layers/cnn_mnist.py
new file mode 100644
index 0000000000..09dbffd517
--- /dev/null
+++ b/tensorflow/examples/tutorials/layers/cnn_mnist.py
@@ -0,0 +1,162 @@
+# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convolutional Neural Network Estimator for MNIST, built with tf.layers."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import numpy as np
+import tensorflow as tf
+
+from tensorflow.contrib import learn
+from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
+
+tf.logging.set_verbosity(tf.logging.INFO)
+
+
+def cnn_model_fn(features, labels, mode):
+ """Model function for CNN."""
+ # Input Layer
+ # Reshape X to 4-D tensor: [batch_size, width, height, channels]
+ # MNIST images are 28x28 pixels, and have one color channel
+ input_layer = tf.reshape(features, [-1, 28, 28, 1])
+
+ # Convolutional Layer #1
+ # Computes 32 features using a 5x5 filter with ReLU activation.
+ # Padding is added to preserve width and height.
+ # Input Tensor Shape: [batch_size, 28, 28, 1]
+ # Output Tensor Shape: [batch_size, 28, 28, 32]
+ conv1 = tf.layers.conv2d(
+ inputs=input_layer,
+ filters=32,
+ kernel_size=[5, 5],
+ padding="same",
+ activation=tf.nn.relu)
+
+ # Pooling Layer #1
+ # First max pooling layer with a 2x2 filter and stride of 2
+ # Input Tensor Shape: [batch_size, 28, 28, 32]
+ # Output Tensor Shape: [batch_size, 14, 14, 32]
+ pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
+
+ # Convolutional Layer #2
+ # Computes 64 features using a 5x5 filter.
+ # Padding is added to preserve width and height.
+ # Input Tensor Shape: [batch_size, 14, 14, 32]
+ # Output Tensor Shape: [batch_size, 14, 14, 64]
+ conv2 = tf.layers.conv2d(
+ inputs=pool1,
+ filters=64,
+ kernel_size=[5, 5],
+ padding="same",
+ activation=tf.nn.relu)
+
+ # Pooling Layer #2
+ # Second max pooling layer with a 2x2 filter and stride of 2
+ # Input Tensor Shape: [batch_size, 14, 14, 64]
+ # Output Tensor Shape: [batch_size, 7, 7, 64]
+ pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
+
+ # Flatten tensor into a batch of vectors
+ # Input Tensor Shape: [batch_size, 7, 7, 64]
+ # Output Tensor Shape: [batch_size, 7 * 7 * 64]
+ pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64])
+
+ # Dense Layer
+ # Densely connected layer with 1024 neurons
+ # Input Tensor Shape: [batch_size, 7 * 7 * 64]
+ # Output Tensor Shape: [batch_size, 1024]
+ dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)
+
+ # Add dropout operation; 0.6 probability that element will be kept
+ dropout = tf.layers.dropout(
+ inputs=dense, rate=0.4, training=mode == learn.ModeKeys.TRAIN)
+
+ # Logits layer
+ # Input Tensor Shape: [batch_size, 1024]
+ # Output Tensor Shape: [batch_size, 10]
+ logits = tf.layers.dense(inputs=dropout, units=10)
+
+ loss = None
+ train_op = None
+
+ # Calculate Loss (for both TRAIN and EVAL modes)
+ if mode != learn.ModeKeys.INFER:
+ onehot_labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=10)
+ loss = tf.losses.softmax_cross_entropy(
+ onehot_labels=onehot_labels, logits=logits)
+
+ # Configure the Training Op (for TRAIN mode)
+ if mode == learn.ModeKeys.TRAIN:
+ train_op = tf.contrib.layers.optimize_loss(
+ loss=loss,
+ global_step=tf.contrib.framework.get_global_step(),
+ learning_rate=0.001,
+ optimizer="SGD")
+
+ # Generate Predictions
+ predictions = {
+ "classes": tf.argmax(
+ input=logits, axis=1),
+ "probabilities": tf.nn.softmax(
+ logits, name="softmax_tensor")
+ }
+
+ # Return a ModelFnOps object
+ return model_fn_lib.ModelFnOps(
+ mode=mode, predictions=predictions, loss=loss, train_op=train_op)
+
+
+def main(unused_argv):
+ # Load training and eval data
+ mnist = learn.datasets.load_dataset("mnist")
+ train_data = mnist.train.images # Returns np.array
+ train_labels = np.asarray(mnist.train.labels, dtype=np.int32)
+ eval_data = mnist.test.images # Returns np.array
+ eval_labels = np.asarray(mnist.test.labels, dtype=np.int32)
+
+ # Create the Estimator
+ mnist_classifier = learn.Estimator(
+ model_fn=cnn_model_fn, model_dir="/tmp/mnist_convnet_model")
+
+ # Set up logging for predictions
+ # Log the values in the "Softmax" tensor with label "probabilities"
+ tensors_to_log = {"probabilities": "softmax_tensor"}
+ logging_hook = tf.train.LoggingTensorHook(
+ tensors=tensors_to_log, every_n_iter=50)
+
+ # Train the model
+ mnist_classifier.fit(
+ x=train_data,
+ y=train_labels,
+ batch_size=100,
+ steps=20000,
+ monitors=[logging_hook])
+
+ # Configure the accuracy metric for evaluation
+ metrics = {
+ "accuracy":
+ learn.metric_spec.MetricSpec(
+ metric_fn=tf.metrics.accuracy, prediction_key="classes"),
+ }
+
+ # Evaluate the model and print results
+ eval_results = mnist_classifier.evaluate(
+ x=eval_data, y=eval_labels, metrics=metrics)
+ print(eval_results)
+
+
+if __name__ == "__main__":
+ tf.app.run()