aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/examples/tutorials
diff options
context:
space:
mode:
authorGravatar Mark Daoust <markdaoust@google.com>2017-06-27 15:51:03 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2017-06-27 15:55:04 -0700
commit824dae164fcce971689e621e28ce62a92a84fdd7 (patch)
tree924db83495919803b44f0bc2414516fc79a578ad /tensorflow/examples/tutorials
parent7dede8848d0903a17ded277392f39f6f452fed28 (diff)
Add name_scopes to mnist_deep.py for a cleaner graph layout.
PiperOrigin-RevId: 160338775
Diffstat (limited to 'tensorflow/examples/tutorials')
-rw-r--r--tensorflow/examples/tutorials/mnist/mnist_deep.py61
1 files changed, 38 insertions, 23 deletions
diff --git a/tensorflow/examples/tutorials/mnist/mnist_deep.py b/tensorflow/examples/tutorials/mnist/mnist_deep.py
index 2896eee77d..4df925c1f3 100644
--- a/tensorflow/examples/tutorials/mnist/mnist_deep.py
+++ b/tensorflow/examples/tutorials/mnist/mnist_deep.py
@@ -52,42 +52,50 @@ def deepnn(x):
# Reshape to use within a convolutional neural net.
# Last dimension is for "features" - there is only one here, since images are
# grayscale -- it would be 3 for an RGB image, 4 for RGBA, etc.
- x_image = tf.reshape(x, [-1, 28, 28, 1])
+ with tf.name_scope('reshape'):
+ x_image = tf.reshape(x, [-1, 28, 28, 1])
# First convolutional layer - maps one grayscale image to 32 feature maps.
- W_conv1 = weight_variable([5, 5, 1, 32])
- b_conv1 = bias_variable([32])
- h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
+ with tf.name_scope('conv1'):
+ W_conv1 = weight_variable([5, 5, 1, 32])
+ b_conv1 = bias_variable([32])
+ h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
# Pooling layer - downsamples by 2X.
- h_pool1 = max_pool_2x2(h_conv1)
+ with tf.name_scope('pool1'):
+ h_pool1 = max_pool_2x2(h_conv1)
# Second convolutional layer -- maps 32 feature maps to 64.
- W_conv2 = weight_variable([5, 5, 32, 64])
- b_conv2 = bias_variable([64])
- h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
+ with tf.name_scope('conv2'):
+ W_conv2 = weight_variable([5, 5, 32, 64])
+ b_conv2 = bias_variable([64])
+ h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
# Second pooling layer.
- h_pool2 = max_pool_2x2(h_conv2)
+ with tf.name_scope('pool2'):
+ h_pool2 = max_pool_2x2(h_conv2)
# Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image
# is down to 7x7x64 feature maps -- maps this to 1024 features.
- W_fc1 = weight_variable([7 * 7 * 64, 1024])
- b_fc1 = bias_variable([1024])
+ with tf.name_scope('fc1'):
+ W_fc1 = weight_variable([7 * 7 * 64, 1024])
+ b_fc1 = bias_variable([1024])
- h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
- h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
+ h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
+ h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# Dropout - controls the complexity of the model, prevents co-adaptation of
# features.
- keep_prob = tf.placeholder(tf.float32)
- h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
+ with tf.name_scope('dropout'):
+ keep_prob = tf.placeholder(tf.float32)
+ h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
# Map the 1024 features to 10 classes, one for each digit
- W_fc2 = weight_variable([1024, 10])
- b_fc2 = bias_variable([10])
+ with tf.name_scope('fc2'):
+ W_fc2 = weight_variable([1024, 10])
+ b_fc2 = bias_variable([10])
- y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
+ y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
return y_conv, keep_prob
@@ -127,11 +135,18 @@ def main(_):
# Build the graph for the deep net
y_conv, keep_prob = deepnn(x)
- cross_entropy = tf.reduce_mean(
- tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
- train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
- correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
- accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
+ with tf.name_scope('loss'):
+ cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=y_,
+ logits=y_conv)
+ cross_entropy = tf.reduce_mean(cross_entropy)
+
+ with tf.name_scope('adam_optimizer'):
+ train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
+
+ with tf.name_scope('accuracy'):
+ correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
+ correct_prediction = tf.cast(correct_prediction, tf.float32)
+ accuracy = tf.reduce_mean(correct_prediction)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())