diff options
Diffstat (limited to 'tensorflow/examples')
6 files changed, 33 insertions, 34 deletions
diff --git a/tensorflow/examples/how_tos/reading_data/fully_connected_preloaded.py b/tensorflow/examples/how_tos/reading_data/fully_connected_preloaded.py index d0482568a2..7795248f82 100644 --- a/tensorflow/examples/how_tos/reading_data/fully_connected_preloaded.py +++ b/tensorflow/examples/how_tos/reading_data/fully_connected_preloaded.py @@ -75,7 +75,7 @@ def run_training(): eval_correct = mnist.evaluation(logits, labels) # Build the summary operation based on the TF collection of Summaries. - summary_op = tf.merge_all_summaries() + summary_op = tf.summary.merge_all() # Create a saver for writing training checkpoints. saver = tf.train.Saver() diff --git a/tensorflow/examples/how_tos/reading_data/fully_connected_preloaded_var.py b/tensorflow/examples/how_tos/reading_data/fully_connected_preloaded_var.py index d037b8731c..5325afbe60 100644 --- a/tensorflow/examples/how_tos/reading_data/fully_connected_preloaded_var.py +++ b/tensorflow/examples/how_tos/reading_data/fully_connected_preloaded_var.py @@ -81,7 +81,7 @@ def run_training(): eval_correct = mnist.evaluation(logits, labels) # Build the summary operation based on the TF collection of Summaries. - summary_op = tf.merge_all_summaries() + summary_op = tf.summary.merge_all() # Create a saver for writing training checkpoints. saver = tf.train.Saver() diff --git a/tensorflow/examples/image_retraining/retrain.py b/tensorflow/examples/image_retraining/retrain.py index d52a23fd15..4f06cb8add 100644 --- a/tensorflow/examples/image_retraining/retrain.py +++ b/tensorflow/examples/image_retraining/retrain.py @@ -647,17 +647,17 @@ def add_input_distortions(flip_left_right, random_crop, random_scale, return jpeg_data, distort_result -def variable_summaries(var, name): +def variable_summaries(var): """Attach a lot of summaries to a Tensor (for TensorBoard visualization).""" with tf.name_scope('summaries'): mean = tf.reduce_mean(var) - tf.scalar_summary('mean/' + name, mean) + tf.summary.scalar('mean', mean) with tf.name_scope('stddev'): stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean))) - tf.scalar_summary('stddev/' + name, stddev) - tf.scalar_summary('max/' + name, tf.reduce_max(var)) - tf.scalar_summary('min/' + name, tf.reduce_min(var)) - tf.histogram_summary(name, var) + tf.summary.scalar('stddev', stddev) + tf.summary.scalar('max', tf.reduce_max(var)) + tf.summary.scalar('min', tf.reduce_min(var)) + tf.summary.histogram('histogram', var) def add_final_training_ops(class_count, final_tensor_name, bottleneck_tensor): @@ -695,23 +695,23 @@ def add_final_training_ops(class_count, final_tensor_name, bottleneck_tensor): with tf.name_scope(layer_name): with tf.name_scope('weights'): layer_weights = tf.Variable(tf.truncated_normal([BOTTLENECK_TENSOR_SIZE, class_count], stddev=0.001), name='final_weights') - variable_summaries(layer_weights, layer_name + '/weights') + variable_summaries(layer_weights) with tf.name_scope('biases'): layer_biases = tf.Variable(tf.zeros([class_count]), name='final_biases') - variable_summaries(layer_biases, layer_name + '/biases') + variable_summaries(layer_biases) with tf.name_scope('Wx_plus_b'): logits = tf.matmul(bottleneck_input, layer_weights) + layer_biases - tf.histogram_summary(layer_name + '/pre_activations', logits) + tf.summary.histogram('pre_activations', logits) final_tensor = tf.nn.softmax(logits, name=final_tensor_name) - tf.histogram_summary(final_tensor_name + '/activations', final_tensor) + tf.summary.histogram('activations', final_tensor) with tf.name_scope('cross_entropy'): cross_entropy = tf.nn.softmax_cross_entropy_with_logits( logits, ground_truth_input) with tf.name_scope('total'): cross_entropy_mean = tf.reduce_mean(cross_entropy) - tf.scalar_summary('cross entropy', cross_entropy_mean) + tf.summary.scalar('cross_entropy', cross_entropy_mean) with tf.name_scope('train'): train_step = tf.train.GradientDescentOptimizer(FLAGS.learning_rate).minimize( @@ -738,7 +738,7 @@ def add_evaluation_step(result_tensor, ground_truth_tensor): tf.argmax(ground_truth_tensor, 1)) with tf.name_scope('accuracy'): evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) - tf.scalar_summary('accuracy', evaluation_step) + tf.summary.scalar('accuracy', evaluation_step) return evaluation_step @@ -792,7 +792,7 @@ def main(_): evaluation_step = add_evaluation_step(final_tensor, ground_truth_input) # Merge all the summaries and write them out to /tmp/retrain_logs (by default) - merged = tf.merge_all_summaries() + merged = tf.summary.merge_all() train_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/train', sess.graph) validation_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/validation') diff --git a/tensorflow/examples/tutorials/mnist/fully_connected_feed.py b/tensorflow/examples/tutorials/mnist/fully_connected_feed.py index 38ae88ee5b..4dbd43527b 100644 --- a/tensorflow/examples/tutorials/mnist/fully_connected_feed.py +++ b/tensorflow/examples/tutorials/mnist/fully_connected_feed.py @@ -139,7 +139,7 @@ def run_training(): eval_correct = mnist.evaluation(logits, labels_placeholder) # Build the summary Tensor based on the TF collection of Summaries. - summary = tf.merge_all_summaries() + summary = tf.summary.merge_all() # Add the variable initializer Op. init = tf.initialize_all_variables() diff --git a/tensorflow/examples/tutorials/mnist/mnist.py b/tensorflow/examples/tutorials/mnist/mnist.py index 4960a55a7c..e97a6c48ef 100644 --- a/tensorflow/examples/tutorials/mnist/mnist.py +++ b/tensorflow/examples/tutorials/mnist/mnist.py @@ -118,7 +118,7 @@ def training(loss, learning_rate): train_op: The Op for training. """ # Add a scalar summary for the snapshot loss. - tf.scalar_summary(loss.op.name, loss) + tf.summary.scalar('loss', loss) # Create the gradient descent optimizer with the given learning rate. optimizer = tf.train.GradientDescentOptimizer(learning_rate) # Create a variable to track the global step. diff --git a/tensorflow/examples/tutorials/mnist/mnist_with_summaries.py b/tensorflow/examples/tutorials/mnist/mnist_with_summaries.py index f1eff29718..0597d5149b 100644 --- a/tensorflow/examples/tutorials/mnist/mnist_with_summaries.py +++ b/tensorflow/examples/tutorials/mnist/mnist_with_summaries.py @@ -40,7 +40,6 @@ def train(): fake_data=FLAGS.fake_data) sess = tf.InteractiveSession() - # Create a multilayer model. # Input placeholders @@ -50,7 +49,7 @@ def train(): with tf.name_scope('input_reshape'): image_shaped_input = tf.reshape(x, [-1, 28, 28, 1]) - tf.image_summary('input', image_shaped_input, 10) + tf.summary.image('input', image_shaped_input, 10) # We can't initialize these variables to 0 - the network will get stuck. def weight_variable(shape): @@ -63,17 +62,17 @@ def train(): initial = tf.constant(0.1, shape=shape) return tf.Variable(initial) - def variable_summaries(var, name): - """Attach a lot of summaries to a Tensor.""" + def variable_summaries(var): + """Attach a lot of summaries to a Tensor (for TensorBoard visualization).""" with tf.name_scope('summaries'): mean = tf.reduce_mean(var) - tf.scalar_summary('mean/' + name, mean) + tf.summary.scalar('mean', mean) with tf.name_scope('stddev'): stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean))) - tf.scalar_summary('stddev/' + name, stddev) - tf.scalar_summary('max/' + name, tf.reduce_max(var)) - tf.scalar_summary('min/' + name, tf.reduce_min(var)) - tf.histogram_summary(name, var) + tf.summary.scalar('stddev', stddev) + tf.summary.scalar('max', tf.reduce_max(var)) + tf.summary.scalar('min', tf.reduce_min(var)) + tf.summary.histogram('histogram', var) def nn_layer(input_tensor, input_dim, output_dim, layer_name, act=tf.nn.relu): """Reusable code for making a simple neural net layer. @@ -87,22 +86,22 @@ def train(): # This Variable will hold the state of the weights for the layer with tf.name_scope('weights'): weights = weight_variable([input_dim, output_dim]) - variable_summaries(weights, layer_name + '/weights') + variable_summaries(weights) with tf.name_scope('biases'): biases = bias_variable([output_dim]) - variable_summaries(biases, layer_name + '/biases') + variable_summaries(biases) with tf.name_scope('Wx_plus_b'): preactivate = tf.matmul(input_tensor, weights) + biases - tf.histogram_summary(layer_name + '/pre_activations', preactivate) + tf.summary.histogram('pre_activations', preactivate) activations = act(preactivate, name='activation') - tf.histogram_summary(layer_name + '/activations', activations) + tf.summary.histogram('activations', activations) return activations hidden1 = nn_layer(x, 784, 500, 'layer1') with tf.name_scope('dropout'): keep_prob = tf.placeholder(tf.float32) - tf.scalar_summary('dropout_keep_probability', keep_prob) + tf.summary.scalar('dropout_keep_probability', keep_prob) dropped = tf.nn.dropout(hidden1, keep_prob) # Do not apply softmax activation yet, see below. @@ -122,7 +121,7 @@ def train(): diff = tf.nn.softmax_cross_entropy_with_logits(y, y_) with tf.name_scope('total'): cross_entropy = tf.reduce_mean(diff) - tf.scalar_summary('cross entropy', cross_entropy) + tf.summary.scalar('cross_entropy', cross_entropy) with tf.name_scope('train'): train_step = tf.train.AdamOptimizer(FLAGS.learning_rate).minimize( @@ -133,10 +132,10 @@ def train(): correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) with tf.name_scope('accuracy'): accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) - tf.scalar_summary('accuracy', accuracy) + tf.summary.scalar('accuracy', accuracy) # Merge all the summaries and write them out to /tmp/mnist_logs (by default) - merged = tf.merge_all_summaries() + merged = tf.summary.merge_all() train_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/train', sess.graph) test_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/test') |