diff options
author | 2016-08-28 14:28:18 -0800 | |
---|---|---|
committer | 2016-08-28 15:32:53 -0700 | |
commit | aeac274ed160618afa242512c38d1fd496466136 (patch) | |
tree | 106c47339ebe08e6b97a990b724fd665e213d944 /tensorflow/examples/tutorials/mnist | |
parent | cf35735c11f1022cadd5a8536617276db813e4ec (diff) |
Extend softmax and logsoftmax to make them work on an arbitrary dimension of a non-scalar tensor.
Change: 131540860
Diffstat (limited to 'tensorflow/examples/tutorials/mnist')
-rw-r--r-- | tensorflow/examples/tutorials/mnist/mnist_with_summaries.py | 2 |
1 files changed, 1 insertions, 1 deletions
diff --git a/tensorflow/examples/tutorials/mnist/mnist_with_summaries.py b/tensorflow/examples/tutorials/mnist/mnist_with_summaries.py index 23492e5122..868cfcc3e4 100644 --- a/tensorflow/examples/tutorials/mnist/mnist_with_summaries.py +++ b/tensorflow/examples/tutorials/mnist/mnist_with_summaries.py @@ -100,7 +100,7 @@ def train(): with tf.name_scope('Wx_plus_b'): preactivate = tf.matmul(input_tensor, weights) + biases tf.histogram_summary(layer_name + '/pre_activations', preactivate) - activations = act(preactivate, 'activation') + activations = act(preactivate, name='activation') tf.histogram_summary(layer_name + '/activations', activations) return activations |