aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/examples
diff options
context:
space:
mode:
authorGravatar Martin Wicke <wicke@google.com>2017-01-04 21:25:34 -0800
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2017-01-04 21:46:08 -0800
commit333dc32ff79af21484695157f3d141dc776f7c02 (patch)
treeb379bcaa56bfa54d12ea839fb7e62ab163490743 /tensorflow/examples
parentd9541696b068cfcc1fab66b03d0b8d605b64f14d (diff)
Change arg order for {softmax,sparse_softmax,sigmoid}_cross_entropy_with_logits to be (labels, predictions), and force use of named args to avoid accidents.
Change: 143629623
Diffstat (limited to 'tensorflow/examples')
-rw-r--r--tensorflow/examples/image_retraining/retrain.py2
-rw-r--r--tensorflow/examples/tutorials/mnist/mnist.py5
-rw-r--r--tensorflow/examples/tutorials/mnist/mnist_softmax.py3
-rw-r--r--tensorflow/examples/tutorials/mnist/mnist_with_summaries.py2
-rw-r--r--tensorflow/examples/udacity/2_fullyconnected.ipynb4
-rw-r--r--tensorflow/examples/udacity/4_convolutions.ipynb2
-rw-r--r--tensorflow/examples/udacity/6_lstm.ipynb2
7 files changed, 10 insertions, 10 deletions
diff --git a/tensorflow/examples/image_retraining/retrain.py b/tensorflow/examples/image_retraining/retrain.py
index 0d5ba84c2d..c5518e2603 100644
--- a/tensorflow/examples/image_retraining/retrain.py
+++ b/tensorflow/examples/image_retraining/retrain.py
@@ -723,7 +723,7 @@ def add_final_training_ops(class_count, final_tensor_name, bottleneck_tensor):
with tf.name_scope('cross_entropy'):
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
- logits, ground_truth_input)
+ labels=ground_truth_input, logits=logits)
with tf.name_scope('total'):
cross_entropy_mean = tf.reduce_mean(cross_entropy)
tf.summary.scalar('cross_entropy', cross_entropy_mean)
diff --git a/tensorflow/examples/tutorials/mnist/mnist.py b/tensorflow/examples/tutorials/mnist/mnist.py
index e97a6c48ef..d533697976 100644
--- a/tensorflow/examples/tutorials/mnist/mnist.py
+++ b/tensorflow/examples/tutorials/mnist/mnist.py
@@ -95,9 +95,8 @@ def loss(logits, labels):
"""
labels = tf.to_int64(labels)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
- logits, labels, name='xentropy')
- loss = tf.reduce_mean(cross_entropy, name='xentropy_mean')
- return loss
+ labels=labels, logits=logits, name='xentropy')
+ return tf.reduce_mean(cross_entropy, name='xentropy_mean')
def training(loss, learning_rate):
diff --git a/tensorflow/examples/tutorials/mnist/mnist_softmax.py b/tensorflow/examples/tutorials/mnist/mnist_softmax.py
index 42a406d386..4fa89ff246 100644
--- a/tensorflow/examples/tutorials/mnist/mnist_softmax.py
+++ b/tensorflow/examples/tutorials/mnist/mnist_softmax.py
@@ -54,7 +54,8 @@ def main(_):
#
# So here we use tf.nn.softmax_cross_entropy_with_logits on the raw
# outputs of 'y', and then average across the batch.
- cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y, y_))
+ cross_entropy = tf.reduce_mean(
+ tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
sess = tf.InteractiveSession()
diff --git a/tensorflow/examples/tutorials/mnist/mnist_with_summaries.py b/tensorflow/examples/tutorials/mnist/mnist_with_summaries.py
index 83879d0807..ff78f151c3 100644
--- a/tensorflow/examples/tutorials/mnist/mnist_with_summaries.py
+++ b/tensorflow/examples/tutorials/mnist/mnist_with_summaries.py
@@ -119,7 +119,7 @@ def train():
# So here we use tf.nn.softmax_cross_entropy_with_logits on the
# raw outputs of the nn_layer above, and then average across
# the batch.
- diff = tf.nn.softmax_cross_entropy_with_logits(y, y_)
+ diff = tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y)
with tf.name_scope('total'):
cross_entropy = tf.reduce_mean(diff)
tf.summary.scalar('cross_entropy', cross_entropy)
diff --git a/tensorflow/examples/udacity/2_fullyconnected.ipynb b/tensorflow/examples/udacity/2_fullyconnected.ipynb
index 8a845171a4..a6a206307a 100644
--- a/tensorflow/examples/udacity/2_fullyconnected.ipynb
+++ b/tensorflow/examples/udacity/2_fullyconnected.ipynb
@@ -271,7 +271,7 @@
" # cross-entropy across all training examples: that's our loss.\n",
" logits = tf.matmul(tf_train_dataset, weights) + biases\n",
" loss = tf.reduce_mean(\n",
- " tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))\n",
+ " tf.nn.softmax_cross_entropy_with_logits(labels=tf_train_labels, logits=logits))\n",
" \n",
" # Optimizer.\n",
" # We are going to find the minimum of this loss using gradient descent.\n",
@@ -448,7 +448,7 @@
" # Training computation.\n",
" logits = tf.matmul(tf_train_dataset, weights) + biases\n",
" loss = tf.reduce_mean(\n",
- " tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))\n",
+ " tf.nn.softmax_cross_entropy_with_logits(labels=tf_train_labels, logits=logits))\n",
" \n",
" # Optimizer.\n",
" optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)\n",
diff --git a/tensorflow/examples/udacity/4_convolutions.ipynb b/tensorflow/examples/udacity/4_convolutions.ipynb
index 464d2c836e..d607dddbb2 100644
--- a/tensorflow/examples/udacity/4_convolutions.ipynb
+++ b/tensorflow/examples/udacity/4_convolutions.ipynb
@@ -286,7 +286,7 @@
" # Training computation.\n",
" logits = model(tf_train_dataset)\n",
" loss = tf.reduce_mean(\n",
- " tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))\n",
+ " tf.nn.softmax_cross_entropy_with_logits(labels=tf_train_labels, logits=logits))\n",
" \n",
" # Optimizer.\n",
" optimizer = tf.train.GradientDescentOptimizer(0.05).minimize(loss)\n",
diff --git a/tensorflow/examples/udacity/6_lstm.ipynb b/tensorflow/examples/udacity/6_lstm.ipynb
index 64e913acf8..7e78c5328f 100644
--- a/tensorflow/examples/udacity/6_lstm.ipynb
+++ b/tensorflow/examples/udacity/6_lstm.ipynb
@@ -576,7 +576,7 @@
" logits = tf.nn.xw_plus_b(tf.concat_v2(outputs, 0), w, b)\n",
" loss = tf.reduce_mean(\n",
" tf.nn.softmax_cross_entropy_with_logits(\n",
- " logits, tf.concat_v2(train_labels, 0)))\n",
+ " labels=tf.concat_v2(train_labels, 0), logits=logits))\n",
"\n",
" # Optimizer.\n",
" global_step = tf.Variable(0)\n",